diff options
author | Owen Anderson <resistor@mac.com> | 2009-07-06 23:00:19 +0000 |
---|---|---|
committer | Owen Anderson <resistor@mac.com> | 2009-07-06 23:00:19 +0000 |
commit | 07cf79ef537caff6d39145f190a28a336e629b6f (patch) | |
tree | 34011c1fc5747bedfc3d52def124a8cc54ebf3e5 | |
parent | 76f600b205606a055ec35e7d3fd1a99602329d67 (diff) |
"LLVMContext* " --> "LLVMContext *"
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@74878 91177308-0d34-0410-b5e6-96231b3b80d8
31 files changed, 109 insertions, 109 deletions
diff --git a/include/llvm/Analysis/ConstantFolding.h b/include/llvm/Analysis/ConstantFolding.h index 8007f33609..6c8acf0c27 100644 --- a/include/llvm/Analysis/ConstantFolding.h +++ b/include/llvm/Analysis/ConstantFolding.h @@ -29,13 +29,13 @@ namespace llvm { /// is returned. Note that this function can only fail when attempting to fold /// instructions like loads and stores, which have no constant expression form. /// -Constant *ConstantFoldInstruction(Instruction *I, LLVMContext* Context, +Constant *ConstantFoldInstruction(Instruction *I, LLVMContext *Context, const TargetData *TD = 0); /// ConstantFoldConstantExpression - Attempt to fold the constant expression /// using the specified TargetData. If successful, the constant result is /// result is returned, if not, null is returned. -Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext* Context, +Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext *Context, const TargetData *TD = 0); /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the @@ -46,7 +46,7 @@ Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext* Context, /// Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, Constant*const * Ops, unsigned NumOps, - LLVMContext* Context, + LLVMContext *Context, const TargetData *TD = 0); /// ConstantFoldCompareInstOperands - Attempt to constant fold a compare @@ -55,7 +55,7 @@ Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, /// Constant *ConstantFoldCompareInstOperands(unsigned Predicate, Constant*const * Ops, unsigned NumOps, - LLVMContext* Context, + LLVMContext *Context, const TargetData *TD = 0); @@ -63,7 +63,7 @@ Constant *ConstantFoldCompareInstOperands(unsigned Predicate, /// getelementptr constantexpr, return the constant value being addressed by the /// constant expression, or null if something is funny and we can't decide. Constant *ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE, - LLVMContext* Context); + LLVMContext *Context); /// canConstantFoldCallTo - Return true if its even possible to fold a call to /// the specified function. diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index 584a441dcb..36f881140d 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -355,7 +355,7 @@ namespace llvm { static char ID; // Pass identification, replacement for typeid ScalarEvolution(); - LLVMContext* getContext() const { return Context; } + LLVMContext *getContext() const { return Context; } /// isSCEVable - Test if values of the given type are analyzable within /// the SCEV framework. This primarily includes integer types, and it diff --git a/include/llvm/Analysis/SparsePropagation.h b/include/llvm/Analysis/SparsePropagation.h index 356a5ee693..638008d78c 100644 --- a/include/llvm/Analysis/SparsePropagation.h +++ b/include/llvm/Analysis/SparsePropagation.h @@ -131,7 +131,7 @@ class SparseSolver { SparseSolver(const SparseSolver&); // DO NOT IMPLEMENT void operator=(const SparseSolver&); // DO NOT IMPLEMENT public: - explicit SparseSolver(AbstractLatticeFunction *Lattice, LLVMContext* C) + explicit SparseSolver(AbstractLatticeFunction *Lattice, LLVMContext *C) : LatticeFunc(Lattice), Context(C) {} ~SparseSolver() { delete LatticeFunc; diff --git a/include/llvm/BasicBlock.h b/include/llvm/BasicBlock.h index 59b93e817a..4f4da1ca2f 100644 --- a/include/llvm/BasicBlock.h +++ b/include/llvm/BasicBlock.h @@ -88,7 +88,7 @@ private: public: /// getContext - Get the context in which this basic block lives, /// or null if it is not currently attached to a function. - LLVMContext* getContext() const; + LLVMContext *getContext() const; /// Instruction iterators... typedef InstListType::iterator iterator; diff --git a/include/llvm/Function.h b/include/llvm/Function.h index db728bff31..04c1442400 100644 --- a/include/llvm/Function.h +++ b/include/llvm/Function.h @@ -129,7 +129,7 @@ public: /// getContext - Return a pointer to the LLVMContext associated with this /// function, or NULL if this function is not bound to a context yet. - LLVMContext* getContext() const; + LLVMContext *getContext() const; /// isVarArg - Return true if this function takes a variable number of /// arguments. diff --git a/include/llvm/Pass.h b/include/llvm/Pass.h index eea99e028d..51186fa2db 100644 --- a/include/llvm/Pass.h +++ b/include/llvm/Pass.h @@ -79,7 +79,7 @@ class Pass { Pass(const Pass &); // DO NOT IMPLEMENT protected: - LLVMContext* Context; + LLVMContext *Context; public: explicit Pass(intptr_t pid) : Resolver(0), PassID(pid) { diff --git a/include/llvm/Support/TargetFolder.h b/include/llvm/Support/TargetFolder.h index 4834a12d0a..a1b63aab84 100644 --- a/include/llvm/Support/TargetFolder.h +++ b/include/llvm/Support/TargetFolder.h @@ -30,7 +30,7 @@ class LLVMContext; /// TargetFolder - Create constants with target dependent folding. class TargetFolder { const TargetData *TD; - LLVMContext* Context; + LLVMContext *Context; /// Fold - Fold the constant using target specific information. Constant *Fold(Constant *C) const { @@ -41,7 +41,7 @@ class TargetFolder { } public: - explicit TargetFolder(const TargetData *TheTD, LLVMContext* C) : + explicit TargetFolder(const TargetData *TheTD, LLVMContext *C) : TD(TheTD), Context(C) {} //===--------------------------------------------------------------------===// diff --git a/include/llvm/Transforms/Utils/PromoteMemToReg.h b/include/llvm/Transforms/Utils/PromoteMemToReg.h index 3d058002ce..91d51e2539 100644 --- a/include/llvm/Transforms/Utils/PromoteMemToReg.h +++ b/include/llvm/Transforms/Utils/PromoteMemToReg.h @@ -40,7 +40,7 @@ bool isAllocaPromotable(const AllocaInst *AI); /// void PromoteMemToReg(const std::vector<AllocaInst*> &Allocas, DominatorTree &DT, DominanceFrontier &DF, - LLVMContext* Context, + LLVMContext *Context, AliasSetTracker *AST = 0); } // End llvm namespace diff --git a/include/llvm/Transforms/Utils/ValueMapper.h b/include/llvm/Transforms/Utils/ValueMapper.h index bddf393b33..dd294bbf82 100644 --- a/include/llvm/Transforms/Utils/ValueMapper.h +++ b/include/llvm/Transforms/Utils/ValueMapper.h @@ -23,7 +23,7 @@ namespace llvm { class LLVMContext; typedef DenseMap<const Value *, Value *> ValueMapTy; - Value *MapValue(const Value *V, ValueMapTy &VM, LLVMContext* Context); + Value *MapValue(const Value *V, ValueMapTy &VM, LLVMContext *Context); void RemapInstruction(Instruction *I, ValueMapTy &VM); } // End llvm namespace diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index a3d19a8a99..c474fe7a57 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -499,7 +499,7 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size, // This function is used to determine if the indices of two GEP instructions are // equal. V1 and V2 are the indices. -static bool IndexOperandsEqual(Value *V1, Value *V2, LLVMContext* Context) { +static bool IndexOperandsEqual(Value *V1, Value *V2, LLVMContext *Context) { if (V1->getType() == V2->getType()) return V1 == V2; if (Constant *C1 = dyn_cast<Constant>(V1)) diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index dcfea7fea8..7ebfec3dff 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -94,7 +94,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, /// otherwise TD is null. static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1, const TargetData *TD, - LLVMContext* Context){ + LLVMContext *Context){ // SROA // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. @@ -123,7 +123,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, /// constant expression, do so. static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps, const Type *ResultTy, - LLVMContext* Context, + LLVMContext *Context, const TargetData *TD) { Constant *Ptr = Ops[0]; if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized()) @@ -157,7 +157,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps, /// FoldBitCast - Constant fold bitcast, symbolically evaluating it with /// targetdata. Return 0 if unfoldable. static Constant *FoldBitCast(Constant *C, const Type *DestTy, - const TargetData &TD, LLVMContext* Context) { + const TargetData &TD, LLVMContext *Context) { // If this is a bitcast from constant vector -> vector, fold it. if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) { if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) { @@ -281,7 +281,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy, /// is returned. Note that this function can only fail when attempting to fold /// instructions like loads and stores, which have no constant expression form. /// -Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext* Context, +Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext *Context, const TargetData *TD) { if (PHINode *PN = dyn_cast<PHINode>(I)) { if (PN->getNumIncomingValues() == 0) @@ -321,7 +321,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext* Context, /// using the specified TargetData. If successful, the constant result is /// result is returned, if not, null is returned. Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE, - LLVMContext* Context, + LLVMContext *Context, const TargetData *TD) { SmallVector<Constant*, 8> Ops; for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i) @@ -344,7 +344,7 @@ Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE, /// Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, Constant* const* Ops, unsigned NumOps, - LLVMContext* Context, + LLVMContext *Context, const TargetData *TD) { // Handle easy binops first. if (Instruction::isBinaryOp(Opcode)) { @@ -470,7 +470,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, Constant*const * Ops, unsigned NumOps, - LLVMContext* Context, + LLVMContext *Context, const TargetData *TD) { // fold: icmp (inttoptr x), null -> icmp x, 0 // fold: icmp (ptrtoint x), 0 -> icmp x, null @@ -543,7 +543,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, /// constant expression, or null if something is funny and we can't decide. Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE, - LLVMContext* Context) { + LLVMContext *Context) { if (CE->getOperand(1) != Context->getNullValue(CE->getOperand(1)->getType())) return 0; // Do not allow stepping over the value! @@ -680,7 +680,7 @@ llvm::canConstantFoldCallTo(const Function *F) { } static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, - const Type *Ty, LLVMContext* Context) { + const Type *Ty, LLVMContext *Context) { errno = 0; V = NativeFP(V); if (errno != 0) { @@ -699,7 +699,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V, double W, const Type *Ty, - LLVMContext* Context) { + LLVMContext *Context) { errno = 0; V = NativeFP(V, W); if (errno != 0) { @@ -722,7 +722,7 @@ Constant * llvm::ConstantFoldCall(Function *F, Constant* const* Operands, unsigned NumOperands) { if (!F->hasName()) return 0; - LLVMContext* Context = F->getContext(); + LLVMContext *Context = F->getContext(); const char *Str = F->getNameStart(); unsigned Len = F->getNameLen(); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 80c5540d99..577315879b 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -3421,7 +3421,7 @@ static Constant *EvaluateExpression(Value *V, Constant *PHIVal) { if (Constant *C = dyn_cast<Constant>(V)) return C; if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV; Instruction *I = cast<Instruction>(V); - LLVMContext* Context = I->getParent()->getContext(); + LLVMContext *Context = I->getParent()->getContext(); std::vector<Constant*> Operands; Operands.resize(I->getNumOperands()); diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 2d590f5df2..ddf1752449 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -835,7 +835,7 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType, SmallVector<unsigned, 10> &Idxs, unsigned IdxSkip, - LLVMContext* Context, + LLVMContext *Context, Instruction *InsertBefore) { const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType); if (STy) { @@ -914,7 +914,7 @@ Value *BuildSubAggregate(Value *From, const unsigned *idx_begin, /// If InsertBefore is not null, this function will duplicate (modified) /// insertvalues when a part of a nested struct is extracted. Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin, - const unsigned *idx_end, LLVMContext* Context, + const unsigned *idx_end, LLVMContext *Context, Instruction *InsertBefore) { // Nothing to index? Just return V then (this is useful at the end of our // recursion) diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 0c03676f7d..f394a3af60 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -246,7 +246,7 @@ static bool AnalyzeGlobal(Value *V, GlobalStatus &GS, } static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx, - LLVMContext* Context) { + LLVMContext *Context) { ConstantInt *CI = dyn_cast<ConstantInt>(Idx); if (!CI) return 0; unsigned IdxV = CI->getZExtValue(); @@ -283,7 +283,7 @@ static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx, /// quick scan over the use list to clean up the easy and obvious cruft. This /// returns true if it made a change. static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, - LLVMContext* Context) { + LLVMContext *Context) { bool Changed = false; for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) { User *U = *UI++; @@ -465,7 +465,7 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) { /// this transformation is safe already. We return the first global variable we /// insert so that the caller can reprocess it. static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD, - LLVMContext* Context) { + LLVMContext *Context) { // Make sure this global only has simple uses that we can SRA. if (!GlobalUsersSafeToSRA(GV)) return 0; @@ -674,7 +674,7 @@ static bool AllUsesOfLoadedValueWillTrapIfNull(GlobalVariable *GV) { } static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV, - LLVMContext* Context) { + LLVMContext *Context) { bool Changed = false; for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) { Instruction *I = cast<Instruction>(*UI++); @@ -742,7 +742,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV, /// if the loaded value is dynamically null, then we know that they cannot be /// reachable with a null optimize away the load. static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, - LLVMContext* Context) { + LLVMContext *Context) { bool Changed = false; // Keep track of whether we are able to remove all the uses of the global @@ -796,7 +796,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the /// instructions that are foldable. -static void ConstantPropUsersOf(Value *V, LLVMContext* Context) { +static void ConstantPropUsersOf(Value *V, LLVMContext *Context) { for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) if (Instruction *I = dyn_cast<Instruction>(*UI++)) if (Constant *NewC = ConstantFoldInstruction(I, Context)) { @@ -817,7 +817,7 @@ static void ConstantPropUsersOf(Value *V, LLVMContext* Context) { /// malloc into a global, and any loads of GV as uses of the new global. static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, MallocInst *MI, - LLVMContext* Context) { + LLVMContext *Context) { DOUT << "PROMOTING MALLOC GLOBAL: " << *GV << " MALLOC = " << *MI; ConstantInt *NElements = cast<ConstantInt>(MI->getArraySize()); @@ -1131,7 +1131,7 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV, static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite, - LLVMContext* Context) { + LLVMContext *Context) { std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; if (FieldNo >= FieldVals.size()) @@ -1174,7 +1174,7 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, static void RewriteHeapSROALoadUser(Instruction *LoadUser, DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite, - LLVMContext* Context) { + LLVMContext *Context) { // If this is a comparison against null, handle it. if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { assert(isa<ConstantPointerNull>(SCI->getOperand(1))); @@ -1245,7 +1245,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser, static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite, - LLVMContext* Context) { + LLVMContext *Context) { for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end(); UI != E; ) { Instruction *User = cast<Instruction>(*UI++); @@ -1262,7 +1262,7 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, /// PerformHeapAllocSRoA - MI is an allocation of an array of structures. Break /// it up into multiple allocations of arrays of the fields. static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI, - LLVMContext* Context){ + LLVMContext *Context){ DOUT << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *MI; const StructType *STy = cast<StructType>(MI->getAllocatedType()); @@ -1442,7 +1442,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, MallocInst *MI, Module::global_iterator &GVI, TargetData &TD, - LLVMContext* Context) { + LLVMContext *Context) { // If this is a malloc of an abstract type, don't touch it. if (!MI->getAllocatedType()->isSized()) return false; @@ -1526,7 +1526,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, // that only one value (besides its initializer) is ever stored to the global. static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, Module::global_iterator &GVI, - TargetData &TD, LLVMContext* Context) { + TargetData &TD, LLVMContext *Context) { // Ignore no-op GEPs and bitcasts. StoredOnceVal = StoredOnceVal->stripPointerCasts(); @@ -1558,7 +1558,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, /// can shrink the global into a boolean and select between the two values /// whenever it is used. This exposes the values to other scalar optimizations. static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal, - LLVMContext* Context) { + LLVMContext *Context) { const Type *GVElType = GV->getType()->getElementType(); // If GVElType is already i1, it is already shrunk. If the type of the GV is @@ -1941,7 +1941,7 @@ static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) { /// specified array, returning the new global to use. static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, const std::vector<Function*> &Ctors, - LLVMContext* Context) { + LLVMContext *Context) { // If we made a change, reassemble the initializer list. std::vector<Constant*> CSVals; CSVals.push_back(Context->getConstantInt(Type::Int32Ty, 65535)); @@ -2009,7 +2009,7 @@ static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues, /// enough for us to understand. In particular, if it is a cast of something, /// we punt. We basically just support direct accesses to globals and GEP's of /// globals. This should be kept up to date with CommitValueTo. -static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext* Context) { +static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext *Context) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) { if (!GV->hasExternalLinkage() && !GV->hasLocalLinkage()) return false; // do not allow weak/linkonce/dllimport/dllexport linkage. @@ -2034,7 +2034,7 @@ static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext* Context) { /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, ConstantExpr *Addr, unsigned OpNo, - LLVMContext* Context) { + LLVMContext *Context) { // Base case of the recursion. if (OpNo == Addr->getNumOperands()) { assert(Val->getType() == Init->getType() && "Type mismatch!"); @@ -2097,7 +2097,7 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, /// CommitValueTo - We have decided that Addr (which satisfies the predicate /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. static void CommitValueTo(Constant *Val, Constant *Addr, - LLVMContext* Context) { + LLVMContext *Context) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { assert(GV->hasInitializer()); GV->setInitializer(Val); @@ -2117,7 +2117,7 @@ static void CommitValueTo(Constant *Val, Constant *Addr, /// decide, return null. static Constant *ComputeLoadResult(Constant *P, const DenseMap<Constant*, Constant*> &Memory, - LLVMContext* Context) { + LLVMContext *Context) { // If this memory location has been recently stored, use the stored value: it // is the most up-to-date. DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P); @@ -2156,7 +2156,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal, if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) return false; - LLVMContext* Context = F->getContext(); + LLVMContext *Context = F->getContext(); CallStack.push_back(F); diff --git a/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/lib/Transforms/Instrumentation/ProfilingUtils.cpp index 8ae4c4f078..a67c35649f 100644 --- a/lib/Transforms/Instrumentation/ProfilingUtils.cpp +++ b/lib/Transforms/Instrumentation/ProfilingUtils.cpp @@ -23,7 +23,7 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName, GlobalValue *Array) { - LLVMContext* Context = MainFn->getContext(); + LLVMContext *Context = MainFn->getContext(); const Type *ArgVTy = Context->getPointerTypeUnqual(Context->getPointerTypeUnqual(Type::Int8Ty)); const PointerType *UIntPtr = Context->getPointerTypeUnqual(Type::Int32Ty); @@ -101,7 +101,7 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName, void llvm::IncrementCounterInBlock(BasicBlock *BB, unsigned CounterNum, GlobalValue *CounterArray) { - LLVMContext* Context = BB->getContext(); + LLVMContext *Context = BB->getContext(); // Insert the increment after any alloca or PHI instructions... BasicBlock::iterator InsertPos = BB->getFirstNonPHI(); diff --git a/lib/Transforms/Instrumentation/RSProfiling.cpp b/lib/Transforms/Instrumentation/RSProfiling.cpp index 5f95f29ea1..e487d2fb47 100644 --- a/lib/Transforms/Instrumentation/RSProfiling.cpp +++ b/lib/Transforms/Instrumentation/RSProfiling.cpp @@ -208,7 +208,7 @@ void GlobalRandomCounter::PrepFunction(Function* F) {} void GlobalRandomCounter::ProcessChoicePoint(BasicBlock* bb) { BranchInst* t = cast<BranchInst>(bb->getTerminator()); - LLVMContext* Context = bb->getContext(); + LLVMContext *Context = bb->getContext(); //decrement counter LoadInst* l = new LoadInst(Counter, "counter", t); @@ -282,7 +282,7 @@ void GlobalRandomCounterOpt::PrepFunction(Function* F) { void GlobalRandomCounterOpt::ProcessChoicePoint(BasicBlock* bb) { BranchInst* t = cast<BranchInst>(bb->getTerminator()); - LLVMContext* Context = bb->getContext(); + LLVMContext *Context = bb->getContext(); //decrement counter LoadInst* l = new LoadInst(AI, "counter", t); @@ -317,7 +317,7 @@ void CycleCounter::PrepFunction(Function* F) {} void CycleCounter::ProcessChoicePoint(BasicBlock* bb) { BranchInst* t = cast<BranchInst>(bb->getTerminator()); - LLVMContext* Context = bb->getContext(); + LLVMContext *Context = bb->getContext(); CallInst* c = CallInst::Create(F, "rdcc", t); BinaryOperator* b = diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index 46a7b4c30d..01c11ba840 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -83,7 +83,7 @@ namespace { static char ID; // Pass identification, replacement for typeid InstCombiner() : FunctionPass(&ID) {} - LLVMContext* getContext() { return Context; } + LLVMContext *getContext() { return Context; } /// AddToWorkList - Add the specified instruction to the worklist if it /// isn't already in it. @@ -568,7 +568,7 @@ bool InstCombiner::SimplifyCompare(CmpInst &I) { // dyn_castNegVal - Given a 'sub' instruction, return the |