aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms
diff options
context:
space:
mode:
authorNuno Lopes <nunoplopes@sapo.pt>2012-07-09 18:38:20 +0000
committerNuno Lopes <nunoplopes@sapo.pt>2012-07-09 18:38:20 +0000
commit78f8ef42173a3a9867ed789073d4ddc652fb7ff2 (patch)
treeb2c859883f471f5d77bfea9f67a4ca79f61ab44c /lib/Transforms
parent874b863f2ab9158db6f7f1b773b77e6334e31c41 (diff)
instcombine: merge the functions that remove dead allocas and dead mallocs/callocs/...
This patch removes ~70 lines in InstCombineLoadStoreAlloca.cpp and makes both functions a bit more aggressive than before :) In theory, we can be more aggressive when removing an alloca than a malloc, because an alloca pointer should never escape, but we are not taking advantage of this anyway git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159952 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/InstCombine/InstCombine.h2
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp73
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp118
4 files changed, 73 insertions, 122 deletions
diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h
index c2b0e03b40..0d5ef904ee 100644
--- a/lib/Transforms/InstCombine/InstCombine.h
+++ b/lib/Transforms/InstCombine/InstCombine.h
@@ -187,7 +187,7 @@ public:
Instruction *visitPHINode(PHINode &PN);
Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
Instruction *visitAllocaInst(AllocaInst &AI);
- Instruction *visitMalloc(Instruction &FI);
+ Instruction *visitAllocSite(Instruction &FI);
Instruction *visitFree(CallInst &FI);
Instruction *visitLoadInst(LoadInst &LI);
Instruction *visitStoreInst(StoreInst &SI);
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index f74cff85c6..c1d9d01c8d 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -880,7 +880,7 @@ static IntrinsicInst *FindInitTrampoline(Value *Callee) {
//
Instruction *InstCombiner::visitCallSite(CallSite CS) {
if (isAllocLikeFn(CS.getInstruction()))
- return visitMalloc(*CS.getInstruction());
+ return visitAllocSite(*CS.getInstruction());
bool Changed = false;
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index b9df5eb81e..c485844aae 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -22,72 +22,6 @@ using namespace llvm;
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
-// Try to kill dead allocas by walking through its uses until we see some use
-// that could escape. This is a conservative analysis which tries to handle
-// GEPs, bitcasts, stores, and no-op intrinsics. These tend to be the things
-// left after inlining and SROA finish chewing on an alloca.
-static Instruction *removeDeadAlloca(InstCombiner &IC, AllocaInst &AI) {
- SmallVector<Instruction *, 4> Worklist, DeadStores;
- Worklist.push_back(&AI);
- do {
- Instruction *PI = Worklist.pop_back_val();
- for (Value::use_iterator UI = PI->use_begin(), UE = PI->use_end();
- UI != UE; ++UI) {
- Instruction *I = cast<Instruction>(*UI);
- switch (I->getOpcode()) {
- default:
- // Give up the moment we see something we can't handle.
- return 0;
-
- case Instruction::GetElementPtr:
- case Instruction::BitCast:
- Worklist.push_back(I);
- continue;
-
- case Instruction::Call:
- // We can handle a limited subset of calls to no-op intrinsics.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
- switch (II->getIntrinsicID()) {
- case Intrinsic::dbg_declare:
- case Intrinsic::dbg_value:
- case Intrinsic::invariant_start:
- case Intrinsic::invariant_end:
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- continue;
- default:
- return 0;
- }
- }
- // Reject everything else.
- return 0;
-
- case Instruction::Store: {
- // Stores into the alloca are only live if the alloca is live.
- StoreInst *SI = cast<StoreInst>(I);
- // We can eliminate atomic stores, but not volatile.
- if (SI->isVolatile())
- return 0;
- // The store is only trivially safe if the poniter is the destination
- // as opposed to the value. We're conservative here and don't check for
- // the case where we store the address of a dead alloca into a dead
- // alloca.
- if (SI->getPointerOperand() != PI)
- return 0;
- DeadStores.push_back(I);
- continue;
- }
- }
- }
- } while (!Worklist.empty());
-
- // The alloca is dead. Kill off all the stores to it, and then replace it
- // with undef.
- while (!DeadStores.empty())
- IC.EraseInstFromFunction(*DeadStores.pop_back_val());
- return IC.ReplaceInstUsesWith(AI, UndefValue::get(AI.getType()));
-}
-
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
// any casting is exposed early.
@@ -179,10 +113,9 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
}
}
- // Try to aggressively remove allocas which are only used for GEPs, lifetime
- // markers, and stores. This happens when SROA iteratively promotes stores
- // out of the alloca, and we need to cleanup after it.
- return removeDeadAlloca(*this, AI);
+ // At last, use the generic allocation site handler to aggressively remove
+ // unused allocas.
+ return visitAllocSite(AI);
}
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index bcf69183bb..88405c9d4d 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1106,71 +1106,89 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
-static bool IsOnlyNullComparedAndFreed(Value *V, SmallVectorImpl<WeakVH> &Users,
- int Depth = 0) {
- if (Depth == 8)
- return false;
+static bool
+isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users) {
+ SmallVector<Instruction*, 4> Worklist;
+ Worklist.push_back(AI);
- for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
- User *U = *UI;
- if (isFreeCall(U)) {
- Users.push_back(U);
- continue;
- }
- if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) {
- if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1))) {
- Users.push_back(ICI);
- continue;
- }
- }
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
- if (IsOnlyNullComparedAndFreed(BCI, Users, Depth+1)) {
- Users.push_back(BCI);
+ do {
+ Instruction *PI = Worklist.pop_back_val();
+ for (Value::use_iterator UI = PI->use_begin(), UE = PI->use_end(); UI != UE;
+ ++UI) {
+ Instruction *I = cast<Instruction>(*UI);
+ switch (I->getOpcode()) {
+ default:
+ // Give up the moment we see something we can't handle.
+ return false;
+
+ case Instruction::BitCast:
+ case Instruction::GetElementPtr:
+ Users.push_back(I);
+ Worklist.push_back(I);
continue;
- }
- }
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
- if (IsOnlyNullComparedAndFreed(GEPI, Users, Depth+1)) {
- Users.push_back(GEPI);
+
+ case Instruction::ICmp: {
+ ICmpInst *ICI = cast<ICmpInst>(I);
+ // We can fold eq/ne comparisons with null to false/true, respectively.
+ if (!ICI->isEquality() || !isa<ConstantPointerNull>(ICI->getOperand(1)))
+ return false;
+ Users.push_back(I);
continue;
}
- }
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
- switch (II->getIntrinsicID()) {
- default: return false;
- case Intrinsic::memmove:
- case Intrinsic::memcpy:
- case Intrinsic::memset: {
- MemIntrinsic *MI = cast<MemIntrinsic>(II);
- if (MI->isVolatile() || MI->getRawDest() != V)
+
+ case Instruction::Call:
+ // Ignore no-op and store intrinsics.
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default:
+ return false;
+
+ case Intrinsic::memmove:
+ case Intrinsic::memcpy:
+ case Intrinsic::memset: {
+ MemIntrinsic *MI = cast<MemIntrinsic>(II);
+ if (MI->isVolatile() || MI->getRawDest() != PI)
+ return false;
+ }
+ // fall through
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::objectsize:
+ Users.push_back(I);
+ continue;
+ }
+ }
+
+ if (isFreeCall(I)) {
+ Users.push_back(I);
+ continue;
+ }
+ return false;
+
+ case Instruction::Store: {
+ StoreInst *SI = cast<StoreInst>(I);
+ if (SI->isVolatile() || SI->getPointerOperand() != PI)
return false;
- }
- // fall through
- case Intrinsic::objectsize:
- case Intrinsic::lifetime_start:
- case Intrinsic::lifetime_end:
- Users.push_back(II);
+ Users.push_back(I);
continue;
}
+ }
+ llvm_unreachable("missing a return?");
}
- if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
- if (SI->isVolatile() || SI->getPointerOperand() != V)
- return false;
- Users.push_back(SI);
- continue;
- }
- return false;
- }
+ } while (!Worklist.empty());
return true;
}
-Instruction *InstCombiner::visitMalloc(Instruction &MI) {
+Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
// If we have a malloc call which is only used in any amount of comparisons
// to null and free calls, delete the calls and replace the comparisons with
// true or false as appropriate.
SmallVector<WeakVH, 64> Users;
- if (IsOnlyNullComparedAndFreed(&MI, Users)) {
+ if (isAllocSiteRemovable(&MI, Users)) {
for (unsigned i = 0, e = Users.size(); i != e; ++i) {
Instruction *I = cast_or_null<Instruction>(&*Users[i]);
if (!I) continue;