aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms
diff options
context:
space:
mode:
authorDerek Schuff <dschuff@chromium.org>2012-10-01 11:20:30 -0700
committerDerek Schuff <dschuff@chromium.org>2012-10-01 11:20:30 -0700
commitb3423dd295c69f78bd731f1ad65ad90ce3efa36f (patch)
tree0e872df2f0333ed1806d9e0a6906b2f5ebd58512 /lib/Transforms
parenta27c28b1427dc2082ab2b31efdbb25f9fde31b61 (diff)
parent72f0976c1b91c7ba50dce4d0ad0289dc14d37f81 (diff)
Merge commit '72f0976c1b91c7ba50dce4d0ad0289dc14d37f81'
Conflicts: lib/Target/ARM/ARMISelDAGToDAG.cpp lib/Target/Mips/MipsISelLowering.cpp lib/Target/Mips/MipsSubtarget.cpp
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp20
-rw-r--r--lib/Transforms/IPO/DeadArgumentElimination.cpp6
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp4
-rw-r--r--lib/Transforms/IPO/InlineAlways.cpp4
-rw-r--r--lib/Transforms/IPO/Inliner.cpp12
-rw-r--r--lib/Transforms/IPO/PassManagerBuilder.cpp7
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp42
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp24
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp2
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp7
-rw-r--r--lib/Transforms/Scalar/CorrelatedValuePropagation.cpp5
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp126
-rw-r--r--lib/Transforms/Scalar/LoopUnrollPass.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp2
-rw-r--r--lib/Transforms/Scalar/SROA.cpp161
-rw-r--r--lib/Transforms/Utils/IntegerDivision.cpp122
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp280
-rw-r--r--lib/Transforms/Utils/ValueMapper.cpp2
19 files changed, 582 insertions, 252 deletions
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index b94dd69deb..10f5b6e658 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -592,14 +592,6 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
Type *RetTy = FTy->getReturnType();
- // Work around LLVM bug PR56: the CWriter cannot emit varargs functions which
- // have zero fixed arguments.
- bool ExtraArgHack = false;
- if (Params.empty() && FTy->isVarArg()) {
- ExtraArgHack = true;
- Params.push_back(Type::getInt32Ty(F->getContext()));
- }
-
// Construct the new function type using the new arguments.
FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());
@@ -711,9 +703,6 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
}
}
- if (ExtraArgHack)
- Args.push_back(Constant::getNullValue(Type::getInt32Ty(F->getContext())));
-
// Push any varargs arguments on the list.
for (; AI != CS.arg_end(); ++AI, ++ArgIndex) {
Args.push_back(*AI);
@@ -870,16 +859,9 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
}
// Increment I2 past all of the arguments added for this promoted pointer.
- for (unsigned i = 0, e = ArgIndices.size(); i != e; ++i)
- ++I2;
+ std::advance(I2, ArgIndices.size());
}
- // Notify the alias analysis implementation that we inserted a new argument.
- if (ExtraArgHack)
- AA.copyValue(Constant::getNullValue(Type::getInt32Ty(F->getContext())),
- NF->arg_begin());
-
-
// Tell the alias analysis that the old function is about to disappear.
AA.replaceWithNewValue(F, NF);
diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp
index fd23a935b9..c7429c5954 100644
--- a/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -717,9 +717,9 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
// here. Currently, this should not be possible, but special handling might be
// required when new return value attributes are added.
if (NRetTy->isVoidTy())
- RAttrs &= ~Attribute::typeIncompatible(NRetTy);
+ RAttrs &= ~Attributes::typeIncompatible(NRetTy);
else
- assert((RAttrs & Attribute::typeIncompatible(NRetTy)) == 0
+ assert((RAttrs & Attributes::typeIncompatible(NRetTy)) == 0
&& "Return attributes no longer compatible?");
if (RAttrs)
@@ -786,7 +786,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
Attributes RAttrs = CallPAL.getRetAttributes();
Attributes FnAttrs = CallPAL.getFnAttributes();
// Adjust in case the function was changed to return void.
- RAttrs &= ~Attribute::typeIncompatible(NF->getReturnType());
+ RAttrs &= ~Attributes::typeIncompatible(NF->getReturnType());
if (RAttrs)
AttributesVec.push_back(AttributeWithIndex::get(0, RAttrs));
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index b888e95982..b1ba6be5ff 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -962,7 +962,9 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
// If we get here we could have other crazy uses that are transitively
// loaded.
assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
- isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser)) &&
+ isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
+ isa<BitCastInst>(GlobalUser) ||
+ isa<GetElementPtrInst>(GlobalUser)) &&
"Only expect load and stores!");
}
}
diff --git a/lib/Transforms/IPO/InlineAlways.cpp b/lib/Transforms/IPO/InlineAlways.cpp
index 664ddf6f7a..42f0991360 100644
--- a/lib/Transforms/IPO/InlineAlways.cpp
+++ b/lib/Transforms/IPO/InlineAlways.cpp
@@ -65,7 +65,7 @@ Pass *llvm::createAlwaysInlinerPass(bool InsertLifetime) {
/// \brief Minimal filter to detect invalid constructs for inlining.
static bool isInlineViable(Function &F) {
- bool ReturnsTwice = F.hasFnAttr(Attribute::ReturnsTwice);
+ bool ReturnsTwice = F.getFnAttributes().hasReturnsTwiceAttr();
for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
// Disallow inlining of functions which contain an indirect branch.
if (isa<IndirectBrInst>(BI->getTerminator()))
@@ -114,7 +114,7 @@ InlineCost AlwaysInliner::getInlineCost(CallSite CS) {
if (Callee->isDeclaration()) return InlineCost::getNever();
// Return never for anything not marked as always inline.
- if (!Callee->hasFnAttr(Attribute::AlwaysInline))
+ if (!Callee->getFnAttributes().hasAlwaysInlineAttr())
return InlineCost::getNever();
// Do some minimal analysis to preclude non-viable functions.
diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp
index a9263baa44..7932b40bdc 100644
--- a/lib/Transforms/IPO/Inliner.cpp
+++ b/lib/Transforms/IPO/Inliner.cpp
@@ -93,10 +93,10 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
// If the inlined function had a higher stack protection level than the
// calling function, then bump up the caller's stack protection level.
- if (Callee->hasFnAttr(Attribute::StackProtectReq))
+ if (Callee->getFnAttributes().hasStackProtectReqAttr())
Caller->addFnAttr(Attribute::StackProtectReq);
- else if (Callee->hasFnAttr(Attribute::StackProtect) &&
- !Caller->hasFnAttr(Attribute::StackProtectReq))
+ else if (Callee->getFnAttributes().hasStackProtectAttr() &&
+ !Caller->getFnAttributes().hasStackProtectReqAttr())
Caller->addFnAttr(Attribute::StackProtect);
// Look at all of the allocas that we inlined through this call site. If we
@@ -209,7 +209,7 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
// would decrease the threshold.
Function *Caller = CS.getCaller();
bool OptSize = Caller && !Caller->isDeclaration() &&
- Caller->hasFnAttr(Attribute::OptimizeForSize);
+ Caller->getFnAttributes().hasOptimizeForSizeAttr();
if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
OptSizeThreshold < thres)
thres = OptSizeThreshold;
@@ -217,7 +217,7 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
// Listen to the inlinehint attribute when it would increase the threshold.
Function *Callee = CS.getCalledFunction();
bool InlineHint = Callee && !Callee->isDeclaration() &&
- Callee->hasFnAttr(Attribute::InlineHint);
+ Callee->getFnAttributes().hasInlineHintAttr();
if (InlineHint && HintThreshold > thres)
thres = HintThreshold;
@@ -533,7 +533,7 @@ bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
// Handle the case when this function is called and we only want to care
// about always-inline functions. This is a bit of a hack to share code
// between here and the InlineAlways pass.
- if (AlwaysInlineOnly && !F->hasFnAttr(Attribute::AlwaysInline))
+ if (AlwaysInlineOnly && !F->getFnAttributes().hasAlwaysInlineAttr())
continue;
// If the only remaining users of the function are dead constants, remove
diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp
index c81b333813..9e328b9ac9 100644
--- a/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -211,13 +211,12 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
// FIXME: We shouldn't bother with this anymore.
MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes
- // GlobalOpt already deletes dead functions and globals, at -O3 try a
+ // GlobalOpt already deletes dead functions and globals, at -O2 try a
// late pass of GlobalDCE. It is capable of deleting dead cycles.
- if (OptLevel > 2)
+ if (OptLevel > 1) {
MPM.add(createGlobalDCEPass()); // Remove dead fns and globals.
-
- if (OptLevel > 1)
MPM.add(createConstantMergePass()); // Merge dup global constants
+ }
}
addExtensionsToPM(EP_OptimizerLast, MPM);
}
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 23c08699ff..ac30dcdcbf 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1037,7 +1037,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
Attributes RAttrs = CallerPAL.getRetAttributes();
- if (RAttrs & Attribute::typeIncompatible(NewRetTy))
+ if (RAttrs & Attributes::typeIncompatible(NewRetTy))
return false; // Attribute not compatible with transformed value.
}
@@ -1067,7 +1067,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
return false; // Cannot transform this parameter value.
Attributes Attrs = CallerPAL.getParamAttributes(i + 1);
- if (Attrs & Attribute::typeIncompatible(ParamTy))
+ if (Attrs & Attributes::typeIncompatible(ParamTy))
return false; // Attribute not compatible with transformed value.
// If the parameter is passed as a byval argument, then we have to have a
@@ -1141,7 +1141,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// If the return value is not being used, the type may not be compatible
// with the existing attributes. Wipe out any problematic attributes.
- RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
+ RAttrs &= ~Attributes::typeIncompatible(NewRetTy);
// Add the new return attributes.
if (RAttrs)
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 5b6cf4a4a8..a446e427e5 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -264,26 +264,28 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
}
}
- // Check to see if this allocation is only modified by a memcpy/memmove from
- // a constant global whose alignment is equal to or exceeds that of the
- // allocation. If this is the case, we can change all users to use
- // the constant global instead. This is commonly produced by the CFE by
- // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
- // is only subsequently read.
- SmallVector<Instruction *, 4> ToDelete;
- if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
- if (AI.getAlignment() <= getPointeeAlignment(Copy->getSource(), *TD)) {
- DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
- DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
- for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
- EraseInstFromFunction(*ToDelete[i]);
- Constant *TheSrc = cast<Constant>(Copy->getSource());
- Instruction *NewI
- = ReplaceInstUsesWith(AI, ConstantExpr::getBitCast(TheSrc,
- AI.getType()));
- EraseInstFromFunction(*Copy);
- ++NumGlobalCopies;
- return NewI;
+ if (TD) {
+ // Check to see if this allocation is only modified by a memcpy/memmove from
+ // a constant global whose alignment is equal to or exceeds that of the
+ // allocation. If this is the case, we can change all users to use
+ // the constant global instead. This is commonly produced by the CFE by
+ // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
+ // is only subsequently read.
+ SmallVector<Instruction *, 4> ToDelete;
+ if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
+ if (AI.getAlignment() <= getPointeeAlignment(Copy->getSource(), *TD)) {
+ DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
+ DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
+ for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
+ EraseInstFromFunction(*ToDelete[i]);
+ Constant *TheSrc = cast<Constant>(Copy->getSource());
+ Instruction *NewI
+ = ReplaceInstUsesWith(AI, ConstantExpr::getBitCast(TheSrc,
+ AI.getType()));
+ EraseInstFromFunction(*Copy);
+ ++NumGlobalCopies;
+ return NewI;
+ }
}
}
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 291e80019e..0ba7340e64 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -903,7 +903,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return &SI;
}
- if (VectorType* VecTy = dyn_cast<VectorType>(SI.getType())) {
+ if (VectorType *VecTy = dyn_cast<VectorType>(SI.getType())) {
unsigned VWidth = VecTy->getNumElements();
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
@@ -912,6 +912,28 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return ReplaceInstUsesWith(SI, V);
return &SI;
}
+
+ if (ConstantVector *CV = dyn_cast<ConstantVector>(CondVal)) {
+ // Form a shufflevector instruction.
+ SmallVector<Constant *, 8> Mask(VWidth);
+ Type *Int32Ty = Type::getInt32Ty(CV->getContext());
+ for (unsigned i = 0; i != VWidth; ++i) {
+ Constant *Elem = cast<Constant>(CV->getOperand(i));
+ if (ConstantInt *E = dyn_cast<ConstantInt>(Elem))
+ Mask[i] = ConstantInt::get(Int32Ty, i + (E->isZero() ? VWidth : 0));
+ else if (isa<UndefValue>(Elem))
+ Mask[i] = UndefValue::get(Int32Ty);
+ else
+ return 0;
+ }
+ Constant *MaskVal = ConstantVector::get(Mask);
+ Value *V = Builder->CreateShuffleVector(TrueVal, FalseVal, MaskVal);
+ return ReplaceInstUsesWith(SI, V);
+ }
+
+ if (isa<ConstantAggregateZero>(CondVal)) {
+ return ReplaceInstUsesWith(SI, FalseVal);
+ }
}
return 0;
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index afa6a4b5e6..1b102bd243 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -854,7 +854,7 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) {
// If needed, insert __asan_init before checking for AddressSafety attr.
maybeInsertAsanInitAtFunctionEntry(F);
- if (!F.hasFnAttr(Attribute::AddressSafety)) return false;
+ if (!F.getFnAttributes().hasAddressSafetyAttr()) return false;
if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
return false;
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 495cdc6321..305d70f27b 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -149,7 +149,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
TLInfo = &getAnalysis<TargetLibraryInfo>();
DT = getAnalysisIfAvailable<DominatorTree>();
PFI = getAnalysisIfAvailable<ProfileInfo>();
- OptSize = F.hasFnAttr(Attribute::OptimizeForSize);
+ OptSize = F.getFnAttributes().hasOptimizeForSizeAttr();
/// This optimization identifies DIV instructions that can be
/// profitably bypassed and carried out with a shorter, faster divide.
@@ -226,7 +226,8 @@ bool CodeGenPrepare::EliminateFallThrough(Function &F) {
// edge, just collapse it.
BasicBlock *SinglePred = BB->getSinglePredecessor();
- if (!SinglePred || SinglePred == BB) continue;
+ // Don't merge if BB's address is taken.
+ if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
if (Term && !Term->isConditional()) {
@@ -788,7 +789,7 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
}
// If we eliminated all predecessors of the block, delete the block now.
- if (Changed && pred_begin(BB) == pred_end(BB))
+ if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
BB->eraseFromParent();
return Changed;
diff --git a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 9b0aadb0b5..3ec6f3dcc3 100644
--- a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -235,6 +235,11 @@ bool CorrelatedValuePropagation::processSwitch(SwitchInst *SI) {
// This case never fires - remove it.
CI.getCaseSuccessor()->removePredecessor(BB);
SI->removeCase(CI); // Does not invalidate the iterator.
+
+ // The condition can be modified by removePredecessor's PHI simplification
+ // logic.
+ Cond = SI->getCondition();
+
++NumDeadCases;
Changed = true;
} else if (State == LazyValueInfo::True) {
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 1ff4329c84..301ee2f663 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -30,6 +30,7 @@
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/SetVector.h"
@@ -45,6 +46,7 @@ namespace {
AliasAnalysis *AA;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
+ const TargetLibraryInfo *TLI;
static char ID; // Pass identification, replacement for typeid
DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) {
@@ -55,6 +57,7 @@ namespace {
AA = &getAnalysis<AliasAnalysis>();
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTree>();
+ TLI = AA->getTargetLibraryInfo();
bool Changed = false;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
@@ -144,7 +147,7 @@ static void DeleteDeadInstruction(Instruction *I,
/// hasMemoryWrite - Does this instruction write some memory? This only returns
/// true for things that we can analyze with other helpers below.
-static bool hasMemoryWrite(Instruction *I) {
+static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) {
if (isa<StoreInst>(I))
return true;
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
@@ -159,6 +162,26 @@ static bool hasMemoryWrite(Instruction *I) {
return true;
}
}
+ if (CallSite CS = I) {
+ if (Function *F = CS.getCalledFunction()) {
+ if (TLI && TLI->has(LibFunc::strcpy) &&
+ F->getName() == TLI->getName(LibFunc::strcpy)) {
+ return true;
+ }
+ if (TLI && TLI->has(LibFunc::strncpy) &&
+ F->getName() == TLI->getName(LibFunc::strncpy)) {
+ return true;
+ }
+ if (TLI && TLI->has(LibFunc::strcat) &&
+ F->getName() == TLI->getName(LibFunc::strcat)) {
+ return true;
+ }
+ if (TLI && TLI->has(LibFunc::strncat) &&
+ F->getName() == TLI->getName(LibFunc::strncat)) {
+ return true;
+ }
+ }
+ }
return false;
}
@@ -206,7 +229,8 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
/// instruction if any.
static AliasAnalysis::Location
getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
- assert(hasMemoryWrite(Inst) && "Unknown instruction case");
+ assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) &&
+ "Unknown instruction case");
// The only instructions that both read and write are the mem transfer
// instructions (memcpy/memmove).
@@ -223,23 +247,29 @@ static bool isRemovable(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->isUnordered();
- IntrinsicInst *II = cast<IntrinsicInst>(I);
- switch (II->getIntrinsicID()) {
- default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
- case Intrinsic::lifetime_end:
- // Never remove dead lifetime_end's, e.g. because it is followed by a
- // free.
- return false;
- case Intrinsic::init_trampoline:
- // Always safe to remove init_trampoline.
- return true;
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate");
+ case Intrinsic::lifetime_end:
+ // Never remove dead lifetime_end's, e.g. because it is followed by a
+ // free.
+ return false;
+ case Intrinsic::init_trampoline:
+ // Always safe to remove init_trampoline.
+ return true;
- case Intrinsic::memset:
- case Intrinsic::memmove:
- case Intrinsic::memcpy:
- // Don't remove volatile memory intrinsics.
- return !cast<MemIntrinsic>(II)->isVolatile();
+ case Intrinsic::memset:
+ case Intrinsic::memmove:
+ case Intrinsic::memcpy:
+ // Don't remove volatile memory intrinsics.
+ return !cast<MemIntrinsic>(II)->isVolatile();
+ }
}
+
+ if (CallSite CS = I)
+ return CS.getInstruction()->use_empty();
+
+ return false;
}
@@ -250,14 +280,19 @@ static bool isShortenable(Instruction *I) {
if (isa<StoreInst>(I))
return false;
- IntrinsicInst *II = cast<IntrinsicInst>(I);
- switch (II->getIntrinsicID()) {
- default: return false;
- case Intrinsic::memset:
- case Intrinsic::memcpy:
- // Do shorten memory intrinsics.
- return true;
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default: return false;
+ case Intrinsic::memset:
+ case Intrinsic::memcpy:
+ // Do shorten memory intrinsics.
+ return true;
+ }
}
+
+ // Don't shorten libcalls calls for now.
+
+ return false;
}
/// getStoredPointerOperand - Return the pointer that is being written to.
@@ -267,12 +302,18 @@ static Value *getStoredPointerOperand(Instruction *I) {
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
return MI->getDest();
- IntrinsicInst *II = cast<IntrinsicInst>(I);
- switch (II->getIntrinsicID()) {
- default: llvm_unreachable("Unexpected intrinsic!");
- case Intrinsic::init_trampoline:
- return II->getArgOperand(0);
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ switch (II->getIntrinsicID()) {
+ default: llvm_unreachable("Unexpected intrinsic!");
+ case Intrinsic::init_trampoline:
+ return II->getArgOperand(0);
+ }
}
+
+ CallSite CS = I;
+ // All the supported functions so far happen to have dest as their first
+ // argument.
+ return CS.getArgument(0);
}
static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
@@ -455,13 +496,13 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
Instruction *Inst = BBI++;
// Handle 'free' calls specially.
- if (CallInst *F = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
+ if (CallInst *F = isFreeCall(Inst, TLI)) {
MadeChange |= HandleFree(F);
continue;
}
// If we find something that writes memory, get its memory dependence.
- if (!hasMemoryWrite(Inst))
+ if (!hasMemoryWrite(Inst, TLI))
continue;
MemDepResult InstDep = MD->getDependency(Inst);
@@ -484,7 +525,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
// in case we need it.
WeakVH NextInst(BBI);
- DeleteDeadInstruction(SI, *MD, AA->getTargetLibraryInfo());
+ DeleteDeadInstruction(SI, *MD, TLI);
if (NextInst == 0) // Next instruction deleted.
BBI = BB.begin();
@@ -531,7 +572,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
<< *DepWrite << "\n KILLER: " << *Inst << '\n');
// Delete the store and now-dead instructions that feed it.
- DeleteDeadInstruction(DepWrite, *MD, AA->getTargetLibraryInfo());
+ DeleteDeadInstruction(DepWrite, *MD, TLI);
++NumFastStores;
MadeChange = true;
@@ -628,7 +669,7 @@ bool DSE::HandleFree(CallInst *F) {
MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB);
while (Dep.isDef() || Dep.isClobber()) {
Instruction *Dependency = Dep.getInst();
- if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency))
+ if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency))
break;
Value *DepPointer =
@@ -641,7 +682,7 @@ bool DSE::HandleFree(CallInst *F) {
Instruction *Next = llvm::next(BasicBlock::iterator(Dependency));
// DCE instructions only used to calculate that store
- DeleteDeadInstruction(Dependency, *MD, AA->getTargetLibraryInfo());
+ DeleteDeadInstruction(Dependency, *MD, TLI);
++NumFastStores;
MadeChange = true;
@@ -681,8 +722,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// Okay, so these are dead heap objects, but if the pointer never escapes
// then it's leaked by this function anyways.
- else if (isAllocLikeFn(I, AA->getTargetLibraryInfo()) &&
- !PointerMayBeCaptured(I, true, true))
+ else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true))
DeadStackObjects.insert(I);
}
@@ -698,7 +738,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
--BBI;
// If we find a store, check to see if it points into a dead stack value.
- if (hasMemoryWrite(BBI) && isRemovable(BBI)) {
+ if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) {
// See through pointer-to-pointer bitcasts
SmallVector<Value *, 4> Pointers;
GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers);
@@ -726,8 +766,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
dbgs() << '\n');
// DCE instructions only used to calculate that store.
- DeleteDeadInstruction(Dead, *MD, AA->getTargetLibraryInfo(),
- &DeadStackObjects);
+ DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects);
++NumFastStores;
MadeChange = true;
continue;
@@ -735,10 +774,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
}
// Remove any dead non-memory-mutating instructions.
- if (isInstructionTriviallyDead(BBI, AA->getTargetLibraryInfo())) {
+ if (isInstructionTriviallyDead(BBI, TLI)) {
Instruction *Inst = BBI++;
- DeleteDeadInstruction(Inst, *MD, AA->getTargetLibraryInfo(),
- &DeadStackObjects);
+ DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects);
++NumFastOther;
MadeChange = true;
continue;
@@ -754,7 +792,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
if (CallSite CS = cast<Value>(BBI)) {
// Remove allocation function calls from the list of dead stack objects;
// there can't be any references before the definition.
- if (isAllocLikeFn(BBI, AA->getTargetLibraryInfo()))
+ if (isAllocLikeFn(BBI, TLI))
DeadStackObjects.remove(BBI);
// If this call does not access memory, it can't be loading any of our
diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 09a186f7f9..f8709a537f 100644
--- a/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -145,7 +145,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
// not user specified.
unsigned Threshold = CurrentThreshold;
if (!UserThreshold &&
- Header->getParent()->hasFnAttr(Attribute::OptimizeForSize))
+ Header->getParent()->getFnAttributes().hasOptimizeForSizeAttr())
Threshold = OptSizeUnrollThreshold;
// Find trip count and trip multiple if count is not available
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 58f7739888..74c8f43ec2 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -638,7 +638,7 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
// Check to see if it would be profitable to unswitch current loop.
// Do not do non-trivial unswitch while optimizing for size.
- if (OptimizeForSize || F->hasFnAttr(Attribute::OptimizeForSize))
+ if (OptimizeForSize || F->getFnAttributes().hasOptimizeForSizeAttr())
return false;
UnswitchNontrivialCondition(LoopCond, Val, currentLoop);
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index e3182d319c..a8dc0533bf 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -202,11 +202,11 @@ public:
use_iterator use_begin(const_iterator I) { return Uses[I - begin()].begin(); }
use_iterator use_end(unsigned Idx) { return Uses[Idx].end(); }
use_iterator use_end(const_iterator I) { return Uses[I - begin()].end(); }
- void use_insert(unsigned Idx, use_iterator UI, const PartitionUse &U) {
- Uses[Idx].insert(UI, U);
+ void use_push_back(unsigned Idx, const PartitionUse &U) {
+ Uses[Idx].push_back(U);
}
- void use_insert(const_iterator I, use_iterator UI, const PartitionUse &U) {
- Uses[I - begin()].insert(UI, U);
+ void use_push_back(const_iterator I, const PartitionUse &U) {
+ Uses[I - begin()].push_back(U);
}
void use_erase(unsigned Idx, use_iterator UI) { Uses[Idx].erase(UI); }
void use_erase(const_iterator I, use_iterator UI) {
@@ -522,8 +522,10 @@ private:
void insertUse(Instruction &I, int64_t Offset, uint64_t Size,
bool IsSplittable = false) {
- // Completely skip uses which don't overlap the allocation.
- if ((Offset >= 0 && (uint64_t)Offset >= AllocSize) ||
+ // Completely skip uses which have a zero size or don't overlap the
+ // allocation.
+ if (Size == 0 ||
+ (Offset >= 0 && (uint64_t)Offset >= AllocSize) ||
(Offset < 0 && (uint64_t)-Offset >= Size)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
<< " which starts past the end of the " << AllocSize
@@ -660,11 +662,14 @@ private:
bool Inserted = false;
llvm::tie(PMI, Inserted)
= MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx));
- if (!Inserted && Offsets.IsSplittable) {
+ if (Offsets.IsSplittable &&