diff options
author | Victor Hernandez <vhernandez@apple.com> | 2009-10-23 21:09:37 +0000 |
---|---|---|
committer | Victor Hernandez <vhernandez@apple.com> | 2009-10-23 21:09:37 +0000 |
commit | 7b929dad59785f62a66f7c58615082f98441e95e (patch) | |
tree | fe3eb2cd3b56e7c3e89454d73e56986f3ce12ba2 | |
parent | 4ab74cdc124af6b4f57c2d2d09548e01d64a1f34 (diff) |
Remove AllocationInst. Since MallocInst went away, AllocaInst is the only subclass of AllocationInst, so it no longer is necessary.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@84969 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | include/llvm-c/Core.h | 3 | ||||
-rw-r--r-- | include/llvm/Instructions.h | 82 | ||||
-rw-r--r-- | include/llvm/Support/InstVisitor.h | 3 | ||||
-rw-r--r-- | lib/Analysis/AliasAnalysis.cpp | 2 | ||||
-rw-r--r-- | lib/Analysis/BasicAliasAnalysis.cpp | 8 | ||||
-rw-r--r-- | lib/Analysis/IPA/Andersens.cpp | 15 | ||||
-rw-r--r-- | lib/Analysis/InlineCost.cpp | 2 | ||||
-rw-r--r-- | lib/Analysis/MemoryDependenceAnalysis.cpp | 2 | ||||
-rw-r--r-- | lib/Analysis/PointerTracking.cpp | 2 | ||||
-rw-r--r-- | lib/Analysis/ValueTracking.cpp | 2 | ||||
-rw-r--r-- | lib/ExecutionEngine/Interpreter/Execution.cpp | 2 | ||||
-rw-r--r-- | lib/ExecutionEngine/Interpreter/Interpreter.h | 2 | ||||
-rw-r--r-- | lib/Transforms/Scalar/GVN.cpp | 8 | ||||
-rw-r--r-- | lib/Transforms/Scalar/InstructionCombining.cpp | 18 | ||||
-rw-r--r-- | lib/Transforms/Scalar/SCCP.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/Scalar/ScalarReplAggregates.cpp | 61 | ||||
-rw-r--r-- | lib/Transforms/Scalar/TailDuplication.cpp | 2 | ||||
-rw-r--r-- | lib/VMCore/AsmWriter.cpp | 2 | ||||
-rw-r--r-- | lib/VMCore/Instruction.cpp | 2 | ||||
-rw-r--r-- | lib/VMCore/Instructions.cpp | 60 | ||||
-rw-r--r-- | lib/VMCore/Verifier.cpp | 4 |
21 files changed, 138 insertions, 146 deletions
diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h index 353cab20b2..fd8c6d31f8 100644 --- a/include/llvm-c/Core.h +++ b/include/llvm-c/Core.h @@ -455,8 +455,7 @@ void LLVMDisposeTypeHandle(LLVMTypeHandleRef TypeHandle); macro(UnreachableInst) \ macro(UnwindInst) \ macro(UnaryInstruction) \ - macro(AllocationInst) \ - macro(AllocaInst) \ + macro(AllocaInst) \ macro(CastInst) \ macro(BitCastInst) \ macro(FPExtInst) \ diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h index dbeb9e12ee..9a990e7eb9 100644 --- a/include/llvm/Instructions.h +++ b/include/llvm/Instructions.h @@ -34,22 +34,28 @@ class LLVMContext; class DominatorTree; //===----------------------------------------------------------------------===// -// AllocationInst Class +// AllocaInst Class //===----------------------------------------------------------------------===// -/// AllocationInst - This class is the base class of AllocaInst. +/// AllocaInst - an instruction to allocate memory on the stack /// -class AllocationInst : public UnaryInstruction { -protected: - AllocationInst(const Type *Ty, Value *ArraySize, - unsigned iTy, unsigned Align, const Twine &Name = "", - Instruction *InsertBefore = 0); - AllocationInst(const Type *Ty, Value *ArraySize, - unsigned iTy, unsigned Align, const Twine &Name, - BasicBlock *InsertAtEnd); +class AllocaInst : public UnaryInstruction { public: + explicit AllocaInst(const Type *Ty, Value *ArraySize = 0, + const Twine &Name = "", Instruction *InsertBefore = 0); + AllocaInst(const Type *Ty, Value *ArraySize, + const Twine &Name, BasicBlock *InsertAtEnd); + + AllocaInst(const Type *Ty, const Twine &Name, Instruction *InsertBefore = 0); + AllocaInst(const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd); + + AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align, + const Twine &Name = "", Instruction *InsertBefore = 0); + AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align, + const Twine &Name, BasicBlock *InsertAtEnd); + // Out of line virtual method, so the vtable, etc. has a home. - virtual ~AllocationInst(); + virtual ~AllocaInst(); /// isArrayAllocation - Return true if there is an allocation size parameter /// to the allocation instruction that is not 1. @@ -79,63 +85,13 @@ public: unsigned getAlignment() const { return (1u << SubclassData) >> 1; } void setAlignment(unsigned Align); - virtual AllocationInst *clone() const = 0; - - // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const AllocationInst *) { return true; } - static inline bool classof(const Instruction *I) { - return I->getOpcode() == Instruction::Alloca; - } - static inline bool classof(const Value *V) { - return isa<Instruction>(V) && classof(cast<Instruction>(V)); - } -}; - - -//===----------------------------------------------------------------------===// -// AllocaInst Class -//===----------------------------------------------------------------------===// - -/// AllocaInst - an instruction to allocate memory on the stack -/// -class AllocaInst : public AllocationInst { -public: - explicit AllocaInst(const Type *Ty, - Value *ArraySize = 0, - const Twine &NameStr = "", - Instruction *InsertBefore = 0) - : AllocationInst(Ty, ArraySize, Alloca, - 0, NameStr, InsertBefore) {} - AllocaInst(const Type *Ty, - Value *ArraySize, const Twine &NameStr, - BasicBlock *InsertAtEnd) - : AllocationInst(Ty, ArraySize, Alloca, 0, NameStr, InsertAtEnd) {} - - AllocaInst(const Type *Ty, const Twine &NameStr, - Instruction *InsertBefore = 0) - : AllocationInst(Ty, 0, Alloca, 0, NameStr, InsertBefore) {} - AllocaInst(const Type *Ty, const Twine &NameStr, - BasicBlock *InsertAtEnd) - : AllocationInst(Ty, 0, Alloca, 0, NameStr, InsertAtEnd) {} - - AllocaInst(const Type *Ty, Value *ArraySize, - unsigned Align, const Twine &NameStr = "", - Instruction *InsertBefore = 0) - : AllocationInst(Ty, ArraySize, Alloca, - Align, NameStr, InsertBefore) {} - AllocaInst(const Type *Ty, Value *ArraySize, - unsigned Align, const Twine &NameStr, - BasicBlock *InsertAtEnd) - : AllocationInst(Ty, ArraySize, Alloca, - Align, NameStr, InsertAtEnd) {} - - virtual AllocaInst *clone() const; - /// isStaticAlloca - Return true if this alloca is in the entry block of the /// function and is a constant size. If so, the code generator will fold it /// into the prolog/epilog code, so it is basically free. bool isStaticAlloca() const; + virtual AllocaInst *clone() const; + // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const AllocaInst *) { return true; } static inline bool classof(const Instruction *I) { diff --git a/include/llvm/Support/InstVisitor.h b/include/llvm/Support/InstVisitor.h index 440657cfef..1e34e6633d 100644 --- a/include/llvm/Support/InstVisitor.h +++ b/include/llvm/Support/InstVisitor.h @@ -165,7 +165,7 @@ public: RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);} RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);} RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);} - RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(AllocationInst);} + RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(Instruction); } RetTy visitFreeInst(FreeInst &I) { DELEGATE(Instruction); } RetTy visitLoadInst(LoadInst &I) { DELEGATE(Instruction); } RetTy visitStoreInst(StoreInst &I) { DELEGATE(Instruction); } @@ -198,7 +198,6 @@ public: // RetTy visitTerminatorInst(TerminatorInst &I) { DELEGATE(Instruction); } RetTy visitBinaryOperator(BinaryOperator &I) { DELEGATE(Instruction); } - RetTy visitAllocationInst(AllocationInst &I) { DELEGATE(Instruction); } RetTy visitCmpInst(CmpInst &I) { DELEGATE(Instruction); } RetTy visitCastInst(CastInst &I) { DELEGATE(Instruction); } diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp index c456990d8a..0234965a00 100644 --- a/lib/Analysis/AliasAnalysis.cpp +++ b/lib/Analysis/AliasAnalysis.cpp @@ -239,7 +239,7 @@ bool llvm::isNoAliasCall(const Value *V) { /// NoAlias returns /// bool llvm::isIdentifiedObject(const Value *V) { - if (isa<AllocationInst>(V) || isNoAliasCall(V)) + if (isa<AllocaInst>(V) || isNoAliasCall(V)) return true; if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V)) return true; diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 756ffea66b..fa33a2acf7 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -80,7 +80,7 @@ static bool isKnownNonNull(const Value *V) { /// object that never escapes from the function. static bool isNonEscapingLocalObject(const Value *V) { // If this is a local allocation, check to see if it escapes. - if (isa<AllocationInst>(V) || isNoAliasCall(V)) + if (isa<AllocaInst>(V) || isNoAliasCall(V)) return !PointerMayBeCaptured(V, false); // If this is an argument that corresponds to a byval or noalias argument, @@ -104,7 +104,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size, const Type *AccessTy; if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { AccessTy = GV->getType()->getElementType(); - } else if (const AllocationInst *AI = dyn_cast<AllocationInst>(V)) { + } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { if (!AI->isArrayAllocation()) AccessTy = AI->getType()->getElementType(); else @@ -587,8 +587,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size, return NoAlias; // Arguments can't alias with local allocations or noalias calls. - if ((isa<Argument>(O1) && (isa<AllocationInst>(O2) || isNoAliasCall(O2))) || - (isa<Argument>(O2) && (isa<AllocationInst>(O1) || isNoAliasCall(O1)))) + if ((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) || + (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1)))) return NoAlias; // Most objects can't alias null. diff --git a/lib/Analysis/IPA/Andersens.cpp b/lib/Analysis/IPA/Andersens.cpp index 1c9159dfbf..b5129a7d3b 100644 --- a/lib/Analysis/IPA/Andersens.cpp +++ b/lib/Analysis/IPA/Andersens.cpp @@ -594,11 +594,12 @@ namespace { void visitReturnInst(ReturnInst &RI); void visitInvokeInst(InvokeInst &II) { visitCallSite(CallSite(&II)); } void visitCallInst(CallInst &CI) { - if (isMalloc(&CI)) visitAllocationInst(CI); + if (isMalloc(&CI)) visitAlloc(CI); else visitCallSite(CallSite(&CI)); } void visitCallSite(CallSite CS); - void visitAllocationInst(Instruction &I); + void visitAllocaInst(AllocaInst &I); + void visitAlloc(Instruction &I); void visitLoadInst(LoadInst &LI); void visitStoreInst(StoreInst &SI); void visitGetElementPtrInst(GetElementPtrInst &GEP); @@ -792,7 +793,7 @@ void Andersens::IdentifyObjects(Module &M) { // object. if (isa<PointerType>(II->getType())) { ValueNodes[&*II] = NumObjects++; - if (AllocationInst *AI = dyn_cast<AllocationInst>(&*II)) + if (AllocaInst *AI = dyn_cast<AllocaInst>(&*II)) ObjectNodes[AI] = NumObjects++; else if (isMalloc(&*II)) ObjectNodes[&*II] = NumObjects++; @@ -1167,7 +1168,11 @@ void Andersens::visitInstruction(Instruction &I) { } } -void Andersens::visitAllocationInst(Instruction &I) { +void Andersens::visitAllocaInst(AllocaInst &I) { + visitAlloc(I); +} + +void Andersens::visitAlloc(Instruction &I) { unsigned ObjectIndex = getObject(&I); GraphNodes[ObjectIndex].setValue(&I); Constraints.push_back(Constraint(Constraint::AddressOf, getNodeValue(I), @@ -2819,7 +2824,7 @@ void Andersens::PrintNode(const Node *N) const { else errs() << "(unnamed)"; - if (isa<GlobalValue>(V) || isa<AllocationInst>(V) || isMalloc(V)) + if (isa<GlobalValue>(V) || isa<AllocaInst>(V) || isMalloc(V)) if (N == &GraphNodes[getObject(V)]) errs() << "<mem>"; } diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp index b833baaced..4f010b658e 100644 --- a/lib/Analysis/InlineCost.cpp +++ b/lib/Analysis/InlineCost.cpp @@ -50,7 +50,7 @@ unsigned InlineCostAnalyzer::FunctionInfo:: // Unfortunately, we don't know the pointer that may get propagated here, // so we can't make this decision. if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() || - isa<AllocationInst>(Inst)) + isa<AllocaInst>(Inst)) continue; bool AllOperandsConstant = true; diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index d6400757a5..c3aa5bc930 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -229,7 +229,7 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad, // a subsequent bitcast of the malloc call result. There can be stores to // the malloced memory between the malloc call and its bitcast uses, and we // need to continue scanning until the malloc call. - if (isa<AllocationInst>(Inst) || extractMallocCall(Inst)) { + if (isa<AllocaInst>(Inst) || extractMallocCall(Inst)) { Value *AccessPtr = MemPtr->getUnderlyingObject(); if (AccessPtr == Inst || diff --git a/lib/Analysis/PointerTracking.cpp b/lib/Analysis/PointerTracking.cpp index 2309fbc952..4abd6fe26a 100644 --- a/lib/Analysis/PointerTracking.cpp +++ b/lib/Analysis/PointerTracking.cpp @@ -93,7 +93,7 @@ bool PointerTracking::doInitialization(Module &M) { const SCEV *PointerTracking::computeAllocationCount(Value *P, const Type *&Ty) const { Value *V = P->stripPointerCasts(); - if (AllocationInst *AI = dyn_cast<AllocationInst>(V)) { + if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { Value *arraySize = AI->getArraySize(); Ty = AI->getAllocatedType(); // arraySize elements of type Ty. diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index dc0d489047..5672510a72 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -470,7 +470,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, } case Instruction::Alloca: { - AllocationInst *AI = cast<AllocationInst>(V); + AllocaInst *AI = cast<AllocaInst>(V); unsigned Align = AI->getAlignment(); if (Align == 0 && TD) Align = TD->getABITypeAlignment(AI->getType()->getElementType()); diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp index f8c775ee7c..151bd00de3 100644 --- a/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -720,7 +720,7 @@ void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){ // Memory Instruction Implementations //===----------------------------------------------------------------------===// -void Interpreter::visitAllocationInst(AllocationInst &I) { +void Interpreter::visitAllocaInst(AllocaInst &I) { ExecutionContext &SF = ECStack.back(); const Type *Ty = I.getType()->getElementType(); // Type to be allocated diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.h b/lib/ExecutionEngine/Interpreter/Interpreter.h index e026287bb5..e43a24bda2 100644 --- a/lib/ExecutionEngine/Interpreter/Interpreter.h +++ b/lib/ExecutionEngine/Interpreter/Interpreter.h @@ -139,7 +139,7 @@ public: void visitBinaryOperator(BinaryOperator &I); void visitICmpInst(ICmpInst &I); void visitFCmpInst(FCmpInst &I); - void visitAllocationInst(AllocationInst &I); + void visitAllocaInst(AllocaInst &I); void visitFreeInst(FreeInst &I); void visitLoadInst(LoadInst &I); void visitStoreInst(StoreInst &I); diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index 8859324914..00911b5ed4 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -1243,7 +1243,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI, Instruction *DepInst = DepInfo.getInst(); // Loading the allocation -> undef. - if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) { + if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, UndefValue::get(LI->getType()))); continue; @@ -1585,7 +1585,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) { // If this load really doesn't depend on anything, then we must be loading an // undef value. This can happen when loading for a fresh allocation with no // intervening stores, for example. - if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) { + if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) { L->replaceAllUsesWith(UndefValue::get(L->getType())); toErase.push_back(L); NumGVNLoad++; @@ -1653,7 +1653,7 @@ bool GVN::processInstruction(Instruction *I, // Allocations are always uniquely numbered, so we can save time and memory // by fast failing them. - } else if (isa<AllocationInst>(I) || isa<TerminatorInst>(I)) { + } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) { localAvail[I->getParent()]->table.insert(std::make_pair(Num, I)); return false; } @@ -1803,7 +1803,7 @@ bool GVN::performPRE(Function& F) { BE = CurrentBlock->end(); BI != BE; ) { Instruction *CurInst = BI++; - if (isa<AllocationInst>(CurInst) || + if (isa<AllocaInst>(CurInst) || isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() || CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index b41b5d4177..21554c108d 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -284,7 +284,7 @@ namespace { Instruction *visitInvokeInst(InvokeInst &II); Instruction *visitPHINode(PHINode &PN); Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP); - Instruction *visitAllocationInst(AllocationInst &AI); + Instruction *visitAllocaInst(AllocaInst &AI); Instruction *visitFreeInst(FreeInst &FI); Instruction *visitLoadInst(LoadInst &LI); Instruction *visitStoreInst(StoreInst &SI); @@ -425,7 +425,7 @@ namespace { bool isSub, Instruction &I); Instruction *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi, bool isSigned, bool Inside, Instruction &IB); - Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocationInst &AI); + Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI); Instruction *MatchBSwap(BinaryOperator &I); bool SimplifyStoreAtEndOfBlock(StoreInst &SI); Instruction *SimplifyMemTransfer(MemIntrinsic *MI); @@ -7745,7 +7745,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale, /// PromoteCastOfAllocation - If we find a cast of an allocation instruction, /// try to eliminate the cast by moving the type information into the alloc. Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, - AllocationInst &AI) { + AllocaInst &AI) { const PointerType *PTy = cast<PointerType>(CI.getType()); BuilderTy AllocaBuilder(*Builder); @@ -7817,7 +7817,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp"); } - AllocationInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt); + AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt); New->setAlignment(AI.getAlignment()); New->takeName(&AI); @@ -8878,7 +8878,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { // size, rewrite the allocation instruction to allocate the "right" type. // There is no need to modify malloc calls because it is their bitcast that // needs to be cleaned up. - if (AllocationInst *AI = dyn_cast<AllocationInst>(Src)) + if (AllocaInst *AI = dyn_cast<AllocaInst>(Src)) if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) return V; @@ -11199,7 +11199,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (Offset == 0) { // If the bitcast is of an allocation, and the allocation will be // converted to match the type of the cast, don't touch this. - if (isa<AllocationInst>(BCI->getOperand(0)) || + if (isa<AllocaInst>(BCI->getOperand(0)) || isMalloc(BCI->getOperand(0))) { // See if the bitcast simplifies, if so, don't nuke this GEP yet. if (Instruction *I = visitBitCast(*BCI)) { @@ -11238,21 +11238,21 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { return 0; } -Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { +Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { // Convert: malloc Ty, C - where C is a constant != 1 into: malloc [C x Ty], 1 if (AI.isArrayAllocation()) { // Check C != 1 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { const Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!"); - AllocationInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName()); + AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName()); New->setAlignment(AI.getAlignment()); // Scan to the end of the allocation instructions, to skip over a block of // allocas if possible...also skip interleaved debug info // BasicBlock::iterator It = New; - while (isa<AllocationInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It; + while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It; // Now that I is pointing to the first non-allocation-inst in the block, // insert our getelementptr instruction... diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp index b745097872..e3dd54e8bc 100644 --- a/lib/Transforms/Scalar/SCCP.cpp +++ b/lib/Transforms/Scalar/SCCP.cpp @@ -410,7 +410,7 @@ private: void visitCallSite (CallSite CS); void visitUnwindInst (TerminatorInst &I) { /*returns void*/ } void visitUnreachableInst(TerminatorInst &I) { /*returns void*/ } - void visitAllocationInst(Instruction &I) { markOverdefined(&I); } + void visitAllocaInst (Instruction &I) { markOverdefined(&I); } void visitVANextInst (Instruction &I) { markOverdefined(&I); } void visitVAArgInst (Instruction &I) { markOverdefined(&I); } void visitFreeInst (Instruction &I) { /*returns void*/ } diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index 610d874b36..2e3b6943bb 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -100,32 +100,32 @@ namespace { void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; } - int isSafeAllocaToScalarRepl(AllocationInst *AI); + int isSafeAllocaToScalarRepl(AllocaInst *AI); - void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, + void isSafeUseOfAllocation(Instruction *User, AllocaInst *AI, AllocaInfo &Info); - void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, + void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI, AllocaInfo &Info); - void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, + void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI, unsigned OpNo, AllocaInfo &Info); - void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI, + void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocaInst *AI, AllocaInfo &Info); - void DoScalarReplacement(AllocationInst *AI, - std::vector<AllocationInst*> &WorkList); + void DoScalarReplacement(AllocaInst *AI, + std::vector<AllocaInst*> &WorkList); void CleanupGEP(GetElementPtrInst *GEP); - void CleanupAllocaUsers(AllocationInst *AI); - AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base); + void CleanupAllocaUsers(AllocaInst *AI); + AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocaInst *Base); - void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, + void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts); void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, - AllocationInst *AI, + AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts); - void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI, + void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts); - void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, + void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts); bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, @@ -135,7 +135,7 @@ namespace { uint64_t Offset, IRBuilder<> &Builder); Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal, uint64_t Offset, IRBuilder<> &Builder); - static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI); + static Instruction *isOnlyCopiedFromConstantGlobal(AllocaInst *AI); }; } @@ -213,18 +213,18 @@ static uint64_t getNumSAElements(const Type *T) { // them if they are only used by getelementptr instructions. // bool SROA::performScalarRepl(Function &F) { - std::vector<AllocationInst*> WorkList; + std::vector<AllocaInst*> WorkList; // Scan the entry basic block, adding any alloca's and mallocs to the worklist BasicBlock &BB = F.getEntryBlock(); for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I) - if (AllocationInst *A = dyn_cast<AllocationInst>(I)) + if (AllocaInst *A = dyn_cast<AllocaInst>(I)) WorkList.push_back(A); // Process the worklist bool Changed = false; while (!WorkList.empty()) { - AllocationInst *AI = WorkList.back(); + AllocaInst *AI = WorkList.back(); WorkList.pop_back(); // Handle dead allocas trivially. These can be formed by SROA'ing arrays @@ -335,8 +335,8 @@ bool SROA::performScalarRepl(Function &F) { /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl /// predicate, do SROA now. -void SROA::DoScalarReplacement(AllocationInst *AI, - std::vector<AllocationInst*> &WorkList) { +void SROA::DoScalarReplacement(AllocaInst *AI, + std::vector<AllocaInst*> &WorkList) { DEBUG(errs() << "Found inst to SROA: " << *AI << '\n'); SmallVector<AllocaInst*, 32> ElementAllocas; if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { @@ -455,7 +455,7 @@ void SROA::DoScalarReplacement(AllocationInst *AI, /// getelementptr instruction of an array aggregate allocation. isFirstElt /// indicates whether Ptr is known to the start of the aggregate. /// -void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI, +void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI, AllocaInfo &Info) { for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); I != E; ++I) { @@ -520,7 +520,7 @@ static bool AllUsersAreLoads(Value *Ptr) { /// isSafeUseOfAllocation - Check to see if this user is an allowed use for an /// aggregate allocation. /// -void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, +void SROA::isSafeUseOfAllocation(Instruction *User, AllocaInst *AI, AllocaInfo &Info) { if (BitCastInst *C = dyn_cast<BitCastInst>(User)) return isSafeUseOfBitCastedAllocation(C, AI, Info); @@ -605,7 +605,7 @@ void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI, /// isSafeMemIntrinsicOnAllocation - Return true if the specified memory /// intrinsic can be promoted by SROA. At this point, we know that the operand /// of the memintrinsic is a pointer to the beginning of the allocation. -void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, +void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI, unsigned OpNo, AllocaInfo &Info) { // If not constant length, give up. ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); @@ -632,7 +632,7 @@ void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, /// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast /// are -void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, +void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocaInst *AI, AllocaInfo &Info) { for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); UI != E; ++UI) { @@ -690,7 +690,7 @@ void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, /// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes /// to its first element. Transform users of the cast to use the new values /// instead. -void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, +void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts) { Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); while (UI != UE) { @@ -729,7 +729,7 @@ void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI, /// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI. /// Rewrite it to copy or set the elements of the scalarized memory. void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, - AllocationInst *AI, + AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts) { // If this is a memcpy/memmove, construct the other pointer as the @@ -905,8 +905,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, /// RewriteStoreUserOfWholeAlloca - We found an store of an integer that /// overwrites the entire allocation. Extract out the pieces of the stored /// integer and store them individually. -void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, - AllocationInst *AI, +void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts){ // Extract each element out of the integer according to its structure offset // and store the element value to the individual alloca. @@ -1029,7 +1028,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, /// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to /// an integer. Load the individual pieces to form the aggregate value. -void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, +void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts) { // Extract each element out of the NewElts according to its structure offset // and form the result value. @@ -1162,7 +1161,7 @@ static bool HasPadding(c |