diff options
author | Nate Begeman <natebegeman@mac.com> | 2005-11-05 09:21:28 +0000 |
---|---|---|
committer | Nate Begeman <natebegeman@mac.com> | 2005-11-05 09:21:28 +0000 |
commit | 14b0529532904b9e5a1e34526b4a3209f3e5bc62 (patch) | |
tree | 43e99ef7bb1cdfbb2828eea5617026f6d426e787 /lib/Transforms | |
parent | ae4664a9f2da955c9d2a3f38b28f0a4395851ace (diff) |
Add support alignment of allocation instructions.
Add support for specifying alignment and size of setjmp jmpbufs.
No targets currently do anything with this information, nor is it presrved
in the bytecode representation. That's coming up next.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@24196 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms')
-rw-r--r-- | lib/Transforms/IPO/GlobalOpt.cpp | 3 | ||||
-rw-r--r-- | lib/Transforms/Scalar/InstructionCombining.cpp | 8 | ||||
-rw-r--r-- | lib/Transforms/Scalar/ScalarReplAggregates.cpp | 5 | ||||
-rw-r--r-- | lib/Transforms/Utils/LowerInvoke.cpp | 21 |
4 files changed, 21 insertions, 16 deletions
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 987703c867..c9c8835e19 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -678,7 +678,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, (unsigned)NElements->getRawValue()); MallocInst *NewMI = new MallocInst(NewTy, Constant::getNullValue(Type::UIntTy), - MI->getName(), MI); + MI->getAlignment(), MI->getName(), MI); std::vector<Value*> Indices; Indices.push_back(Constant::getNullValue(Type::IntTy)); Indices.push_back(Indices[0]); @@ -950,6 +950,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, DEBUG(std::cerr << "LOCALIZING GLOBAL: " << *GV); Instruction* FirstI = GS.AccessingFunction->getEntryBlock().begin(); const Type* ElemTy = GV->getType()->getElementType(); + // FIXME: Pass Global's alignment when globals have alignment AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), FirstI); if (!isa<UndefValue>(GV->getInitializer())) new StoreInst(GV->getInitializer(), Alloca, FirstI); diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index 1164fb2e03..6934fe27a4 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -3936,9 +3936,9 @@ Instruction *InstCombiner::PromoteCastOfAllocation(CastInst &CI, std::string Name = AI.getName(); AI.setName(""); AllocationInst *New; if (isa<MallocInst>(AI)) - New = new MallocInst(CastElTy, Amt, Name); + New = new MallocInst(CastElTy, Amt, AI.getAlignment(), Name); else - New = new AllocaInst(CastElTy, Amt, Name); + New = new AllocaInst(CastElTy, Amt, AI.getAlignment(), Name); InsertNewInstBefore(New, AI); // If the allocation has multiple uses, insert a cast and change all things @@ -5266,10 +5266,10 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { // Create and insert the replacement instruction... if (isa<MallocInst>(AI)) - New = new MallocInst(NewTy, 0, AI.getName()); + New = new MallocInst(NewTy, 0, AI.getAlignment(), AI.getName()); else { assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!"); - New = new AllocaInst(NewTy, 0, AI.getName()); + New = new AllocaInst(NewTy, 0, AI.getAlignment(), AI.getName()); } InsertNewInstBefore(New, AI); diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index 4a6aee391c..cc03d0ee68 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -159,7 +159,8 @@ bool SROA::performScalarRepl(Function &F) { if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { ElementAllocas.reserve(ST->getNumContainedTypes()); for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { - AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, + AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, + AI->getAlignment(), AI->getName() + "." + utostr(i), AI); ElementAllocas.push_back(NA); WorkList.push_back(NA); // Add to worklist for recursive processing @@ -169,7 +170,7 @@ bool SROA::performScalarRepl(Function &F) { ElementAllocas.reserve(AT->getNumElements()); const Type *ElTy = AT->getElementType(); for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { - AllocaInst *NA = new AllocaInst(ElTy, 0, + AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), AI->getName() + "." + utostr(i), AI); ElementAllocas.push_back(NA); WorkList.push_back(NA); // Add to worklist for recursive processing diff --git a/lib/Transforms/Utils/LowerInvoke.cpp b/lib/Transforms/Utils/LowerInvoke.cpp index 7039a4b7e1..be706c3ef8 100644 --- a/lib/Transforms/Utils/LowerInvoke.cpp +++ b/lib/Transforms/Utils/LowerInvoke.cpp @@ -67,6 +67,8 @@ namespace { GlobalVariable *JBListHead; Function *SetJmpFn, *LongJmpFn; public: + LowerInvoke(unsigned Size = 200, unsigned Align = 0) : JumpBufSize(Size), + JumpBufAlign(Align) {} bool doInitialization(Module &M); bool runOnFunction(Function &F); @@ -78,6 +80,9 @@ namespace { void rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo, AllocaInst *InvokeNum, SwitchInst *CatchSwitch); bool insertExpensiveEHSupport(Function &F); + + unsigned JumpBufSize; + unsigned JumpBufAlign; }; RegisterOpt<LowerInvoke> @@ -87,7 +92,10 @@ namespace { const PassInfo *llvm::LowerInvokePassID = X.getPassInfo(); // Public Interface To the LowerInvoke pass. -FunctionPass *llvm::createLowerInvokePass() { return new LowerInvoke(); } +FunctionPass *llvm::createLowerInvokePass(unsigned JumpBufSize, + unsigned JumpBufAlign) { + return new LowerInvoke(JumpBufSize, JumpBufAlign); +} // doInitialization - Make sure that there is a prototype for abort in the // current module. @@ -95,13 +103,8 @@ bool LowerInvoke::doInitialization(Module &M) { const Type *VoidPtrTy = PointerType::get(Type::SByteTy); AbortMessage = 0; if (ExpensiveEHSupport) { - // Insert a type for the linked list of jump buffers. Unfortunately, we - // don't know the size of the target's setjmp buffer, so we make a guess. - // If this guess turns out to be too small, bad stuff could happen. - unsigned JmpBufSize = 200; // PPC has 192 words - assert(sizeof(jmp_buf) <= JmpBufSize*sizeof(void*) && - "LowerInvoke doesn't know about targets with jmp_buf size > 200 words!"); - const Type *JmpBufTy = ArrayType::get(VoidPtrTy, JmpBufSize); + // Insert a type for the linked list of jump buffers. + const Type *JmpBufTy = ArrayType::get(VoidPtrTy, JumpBufSize); { // The type is recursive, so use a type holder. std::vector<const Type*> Elements; @@ -441,7 +444,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) { // that needs to be restored on all exits from the function. This is an // alloca because the value needs to be live across invokes. AllocaInst *JmpBuf = - new AllocaInst(JBLinkTy, 0, "jblink", F.begin()->begin()); + new AllocaInst(JBLinkTy, 0, JumpBufAlign, "jblink", F.begin()->begin()); std::vector<Value*> Idx; Idx.push_back(Constant::getNullValue(Type::IntTy)); |