diff options
author | John McCall <rjmccall@apple.com> | 2012-01-29 02:35:02 +0000 |
---|---|---|
committer | John McCall <rjmccall@apple.com> | 2012-01-29 02:35:02 +0000 |
commit | f48f79636d5506d15784c2c2fa8a02086adda40a (patch) | |
tree | 1c1d7f6036c5df44f19fe6702df4dfba0701f276 /lib/CodeGen/CGCall.cpp | |
parent | 4188760f6bb20f91c6883dffd89204419f852dee (diff) |
Get a little bit smarter about killing off the ReturnValue alloca
in the presence of straight-line cleanups. This is a simple but
important case, particularly for ARC.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@149190 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/CGCall.cpp')
-rw-r--r-- | lib/CodeGen/CGCall.cpp | 55 |
1 files changed, 45 insertions, 10 deletions
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp index 1de8f0e3f9..940cee3bbd 100644 --- a/lib/CodeGen/CGCall.cpp +++ b/lib/CodeGen/CGCall.cpp @@ -1189,6 +1189,44 @@ static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, return CGF.EmitARCAutoreleaseReturnValue(result); } +/// Heuristically search for a dominating store to the return-value slot. +static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { + // If there are multiple uses of the return-value slot, just check + // for something immediately preceding the IP. Sometimes this can + // happen with how we generate implicit-returns; it can also happen + // with noreturn cleanups. + if (!CGF.ReturnValue->hasOneUse()) { + llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); + if (IP->empty()) return 0; + llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); + if (!store) return 0; + if (store->getPointerOperand() != CGF.ReturnValue) return 0; + assert(!store->isAtomic() && !store->isVolatile()); // see below + return store; + } + + llvm::StoreInst *store = + dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back()); + if (!store) return 0; + + // These aren't actually possible for non-coerced returns, and we + // only care about non-coerced returns on this code path. + assert(!store->isAtomic() && !store->isVolatile()); + + // Now do a first-and-dirty dominance check: just walk up the + // single-predecessors chain from the current insertion point. + llvm::BasicBlock *StoreBB = store->getParent(); + llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); + while (IP != StoreBB) { + if (!(IP = IP->getSinglePredecessor())) + return 0; + } + + // Okay, the store's basic block dominates the insertion point; we + // can do our thing. + return store; +} + void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { // Functions with no result always return void. if (ReturnValue == 0) { @@ -1223,16 +1261,9 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { // The internal return value temp always will have pointer-to-return-type // type, just do a load. - // If the instruction right before the insertion point is a store to the - // return value, we can elide the load, zap the store, and usually zap the - // alloca. - llvm::BasicBlock *InsertBB = Builder.GetInsertBlock(); - llvm::StoreInst *SI = 0; - if (InsertBB->empty() || - !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) || - SI->getPointerOperand() != ReturnValue || SI->isVolatile()) { - RV = Builder.CreateLoad(ReturnValue); - } else { + // If there is a dominating store to ReturnValue, we can elide + // the load, zap the store, and usually zap the alloca. + if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { // Get the stored value and nuke the now-dead store. RetDbgLoc = SI->getDebugLoc(); RV = SI->getValueOperand(); @@ -1243,6 +1274,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); ReturnValue = 0; } + + // Otherwise, we have to do a simple load. + } else { + RV = Builder.CreateLoad(ReturnValue); } } else { llvm::Value *V = ReturnValue; |