aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2012-12-10 08:28:39 +0000
committerChandler Carruth <chandlerc@gmail.com>2012-12-10 08:28:39 +0000
commited90ed077a58d6eff6aede206273ae0aeefa3a94 (patch)
treed6204735bcf5ce7bc0c16b680b56354245e02e17
parent48b509c77318cc6e753513ca5dcdd67e18094235 (diff)
Add a new visitor for walking the uses of a pointer value.
This visitor provides infrastructure for recursively traversing the use-graph of a pointer-producing instruction like an alloca or a malloc. It maintains a worklist of uses to visit, so it can handle very deep recursions. It automatically looks through instructions which simply translate one pointer to another (bitcasts and GEPs). It tracks the offset relative to the original pointer as long as that offset remains constant and exposes it during the visit as an APInt offset. Finally, it performs conservative escape analysis. However, currently it has some limitations that should be addressed going forward: 1) It doesn't handle vectors of pointers. 2) It doesn't provide a cheaper visitor when the constant offset tracking isn't needed. 3) It doesn't support non-instruction pointer values. The current functionality is exactly what is required to implement the SROA pointer-use visitors in terms of this one, rather than in terms of their own ad-hoc base visitor, which was always very poorly specified. SROA has been converted to use this, and the code there deleted which this utility now provides. Technically speaking, using this new visitor allows SROA to handle a few more cases than it previously did. It is now more aggressive in ignoring chains of instructions which look like they would defeat SROA, but in fact do not because they never result in a read or write of memory. While this is "neat", it shouldn't be interesting for real programs as any such chains should have been removed by others passes long before we get to SROA. As a consequence, I've not added any tests for these features -- it shouldn't be part of SROA's contract to perform such heroics. The goal is to extend the functionality of this visitor going forward, and re-use it from passes like ASan that can benefit from doing a detailed walk of the uses of a pointer. Thanks to Ben Kramer for the code review rounds and lots of help reviewing and debugging this patch. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@169728 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/Analysis/PtrUseVisitor.h286
-rw-r--r--lib/Analysis/CMakeLists.txt1
-rw-r--r--lib/Analysis/PtrUseVisitor.cpp58
-rw-r--r--lib/Transforms/Scalar/SROA.cpp378
4 files changed, 504 insertions, 219 deletions
diff --git a/include/llvm/Analysis/PtrUseVisitor.h b/include/llvm/Analysis/PtrUseVisitor.h
new file mode 100644
index 0000000000..e15f2b45a8
--- /dev/null
+++ b/include/llvm/Analysis/PtrUseVisitor.h
@@ -0,0 +1,286 @@
+//===- PtrUseVisitor.h - InstVisitors over a pointers uses ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides a collection of visitors which walk the (instruction)
+/// uses of a pointer. These visitors all provide the same essential behavior
+/// as an InstVisitor with similar template-based flexibility and
+/// implementation strategies.
+///
+/// These can be used, for example, to quickly analyze the uses of an alloca,
+/// global variable, or function argument.
+///
+/// FIXME: Provide a variant which doesn't track offsets and is cheaper.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_PTRUSEVISITOR_H
+#define LLVM_ANALYSIS_PTRUSEVISITOR_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DataLayout.h"
+#include "llvm/InstVisitor.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/GetElementPtrTypeIterator.h"
+
+namespace llvm {
+
+namespace detail {
+/// \brief Implementation of non-dependent functionality for \c PtrUseVisitor.
+///
+/// See \c PtrUseVisitor for the public interface and detailed comments about
+/// usage. This class is just a helper base class which is not templated and
+/// contains all common code to be shared between different instantiations of
+/// PtrUseVisitor.
+class PtrUseVisitorBase {
+public:
+ /// \brief This class provides information about the result of a visit.
+ ///
+ /// After walking all the users (recursively) of a pointer, the basic
+ /// infrastructure records some commonly useful information such as escape
+ /// analysis and whether the visit completed or aborted early.
+ class PtrInfo {
+ public:
+ PtrInfo() : AbortedInfo(0, false), EscapedInfo(0, false) {}
+
+ /// \brief Reset the pointer info, clearing all state.
+ void reset() {
+ AbortedInfo.setPointer(0);
+ AbortedInfo.setInt(false);
+ EscapedInfo.setPointer(0);
+ EscapedInfo.setInt(false);
+ }
+
+ /// \brief Did we abort the visit early?
+ bool isAborted() const { return AbortedInfo.getInt(); }
+
+ /// \brief Is the pointer escaped at some point?
+ bool isEscaped() const { return EscapedInfo.getInt(); }
+
+ /// \brief Get the instruction causing the visit to abort.
+ /// \returns a pointer to the instruction causing the abort if one is
+ /// available; otherwise returns null.
+ Instruction *getAbortingInst() const { return AbortedInfo.getPointer(); }
+
+ /// \brief Get the instruction causing the pointer to escape.
+ /// \returns a pointer to the instruction which escapes the pointer if one
+ /// is available; otherwise returns null.
+ Instruction *getEscapingInst() const { return EscapedInfo.getPointer(); }
+
+ /// \brief Mark the visit as aborted. Intended for use in a void return.
+ /// \param I The instruction which caused the visit to abort, if available.
+ void setAborted(Instruction *I = 0) {
+ AbortedInfo.setInt(true);
+ AbortedInfo.setPointer(I);
+ }
+
+ /// \brief Mark the pointer as escaped. Intended for use in a void return.
+ /// \param I The instruction which escapes the pointer, if available.
+ void setEscaped(Instruction *I = 0) {
+ EscapedInfo.setInt(true);
+ EscapedInfo.setPointer(I);
+ }
+
+ /// \brief Mark the pointer as escaped, and the visit as aborted. Intended
+ /// for use in a void return.
+ /// \param I The instruction which both escapes the pointer and aborts the
+ /// visit, if available.
+ void setEscapedAndAborted(Instruction *I = 0) {
+ setEscaped(I);
+ setAborted(I);
+ }
+
+ private:
+ PointerIntPair<Instruction *, 1, bool> AbortedInfo, EscapedInfo;
+ };
+
+protected:
+ const DataLayout &DL;
+
+ /// \name Visitation infrastructure
+ /// @{
+
+ /// \brief The info collected about the pointer being visited thus far.
+ PtrInfo PI;
+
+ /// \brief A struct of the data needed to visit a particular use.
+ ///
+ /// This is used to maintain a worklist fo to-visit uses. This is used to
+ /// make the visit be iterative rather than recursive.
+ struct UseToVisit {
+ typedef PointerIntPair<Use *, 1, bool> UseAndIsOffsetKnownPair;
+ UseAndIsOffsetKnownPair UseAndIsOffsetKnown;
+ APInt Offset;
+ };
+
+ /// \brief The worklist of to-visit uses.
+ SmallVector<UseToVisit, 8> Worklist;
+
+ /// \brief A set of visited uses to break cycles in unreachable code.
+ SmallPtrSet<Use *, 8> VisitedUses;
+
+ /// @}
+
+
+ /// \name Per-visit state
+ /// This state is reset for each instruction visited.
+ /// @{
+
+ /// \brief The use currently being visited.
+ Use *U;
+
+ /// \brief True if we have a known constant offset for the use currently
+ /// being visited.
+ bool IsOffsetKnown;
+
+ /// \brief The constant offset of the use if that is known.
+ APInt Offset;
+
+ /// @}
+
+
+ /// Note that the constructor is protected because this class must be a base
+ /// class, we can't create instances directly of this class.
+ PtrUseVisitorBase(const DataLayout &DL) : DL(DL) {}
+
+ /// \brief Enqueue the users of this instruction in the visit worklist.
+ ///
+ /// This will visit the users with the same offset of the current visit
+ /// (including an unknown offset if that is the current state).
+ void enqueueUsers(Instruction &I);
+
+ /// \brief Walk the operands of a GEP and adjust the offset as appropriate.
+ ///
+ /// This routine does the heavy lifting of the pointer walk by computing
+ /// offsets and looking through GEPs.
+ bool adjustOffsetForGEP(GetElementPtrInst &GEPI);
+};
+} // end namespace detail
+
+/// \brief A base class for visitors over the uses of a pointer value.
+///
+/// Once constructed, a user can call \c visit on a pointer value, and this
+/// will walk its uses and visit each instruction using an InstVisitor. It also
+/// provides visit methods which will recurse through any pointer-to-pointer
+/// transformations such as GEPs and bitcasts.
+///
+/// During the visit, the current Use* being visited is available to the
+/// subclass, as well as the current offset from the original base pointer if
+/// known.
+///
+/// The recursive visit of uses is accomplished with a worklist, so the only
+/// ordering guarantee is that an instruction is visited before any uses of it
+/// are visited. Note that this does *not* mean before any of its users are
+/// visited! This is because users can be visited multiple times due to
+/// multiple, different uses of pointers derived from the same base.
+///
+/// A particular Use will only be visited once, but a User may be visited
+/// multiple times, once per Use. This visits may notably have different
+/// offsets.
+///
+/// All visit methods on the underlying InstVisitor return a boolean. This
+/// return short-circuits the visit, stopping it immediately.
+///
+/// FIXME: Generalize this for all values rather than just instructions.
+template <typename DerivedT>
+class PtrUseVisitor : protected InstVisitor<DerivedT>,
+ public detail::PtrUseVisitorBase {
+ friend class InstVisitor<DerivedT>;
+ typedef InstVisitor<DerivedT> Base;
+
+public:
+ PtrUseVisitor(const DataLayout &DL) : PtrUseVisitorBase(DL) {}
+
+ /// \brief Recursively visit the uses of the given pointer.
+ /// \returns An info struct about the pointer. See \c PtrInfo for details.
+ PtrInfo visitPtr(Instruction &I) {
+ // This must be a pointer type. Get an integer type suitable to hold
+ // offsets on this pointer.
+ // FIXME: Support a vector of pointers.
+ assert(I.getType()->isPointerTy());
+ IntegerType *IntPtrTy = cast<IntegerType>(DL.getIntPtrType(I.getType()));
+ IsOffsetKnown = true;
+ Offset = APInt(IntPtrTy->getBitWidth(), 0);
+ PI.reset();
+
+ // Enqueue the uses of this pointer.
+ enqueueUsers(I);
+
+ // Visit all the uses off the worklist until it is empty.
+ while (!Worklist.empty()) {
+ UseToVisit ToVisit = Worklist.pop_back_val();
+ U = ToVisit.UseAndIsOffsetKnown.getPointer();
+ IsOffsetKnown = ToVisit.UseAndIsOffsetKnown.getInt();
+ if (IsOffsetKnown)
+ Offset = llvm_move(ToVisit.Offset);
+
+ Instruction *I = cast<Instruction>(U->getUser());
+ static_cast<DerivedT*>(this)->visit(I);
+ if (PI.isAborted())
+ break;
+ }
+ return PI;
+ }
+
+protected:
+ void visitStoreInst(StoreInst &SI) {
+ if (SI.getValueOperand() == U->get())
+ PI.setEscaped(&SI);
+ }
+
+ void visitBitCastInst(BitCastInst &BC) {
+ enqueueUsers(BC);
+ }
+
+ void visitPtrToIntInst(PtrToIntInst &I) {
+ PI.setEscaped(&I);
+ }
+
+ void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
+ if (GEPI.use_empty())
+ return;
+
+ // If we can't walk the GEP, clear the offset.
+ if (!adjustOffsetForGEP(GEPI)) {
+ IsOffsetKnown = false;
+ Offset = APInt();
+ }
+
+ // Enqueue the users now that the offset has been adjusted.
+ enqueueUsers(GEPI);
+ }
+
+ // No-op intrinsics which we know don't escape the pointer to to logic in
+ // some other function.
+ void visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) {}
+ void visitMemIntrinsic(MemIntrinsic &I) {}
+ void visitIntrinsicInst(IntrinsicInst &II) {
+ switch (II.getIntrinsicID()) {
+ default:
+ return Base::visitIntrinsicInst(II);
+
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ return; // No-op intrinsics.
+ }
+ }
+
+ // Generically, arguments to calls and invokes escape the pointer to some
+ // other function. Mark that.
+ void visitCallSite(CallSite CS) {
+ PI.setEscaped(CS.getInstruction());
+ Base::visitCallSite(CS);
+ }
+};
+
+}
+
+#endif
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index b3a40bee42..1b67a9cf92 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -47,6 +47,7 @@ add_llvm_library(LLVMAnalysis
ProfileVerifierPass.cpp
ProfileDataLoader.cpp
ProfileDataLoaderPass.cpp
+ PtrUseVisitor.cpp
RegionInfo.cpp
RegionPass.cpp
RegionPrinter.cpp
diff --git a/lib/Analysis/PtrUseVisitor.cpp b/lib/Analysis/PtrUseVisitor.cpp
new file mode 100644
index 0000000000..b48e86ea6e
--- /dev/null
+++ b/lib/Analysis/PtrUseVisitor.cpp
@@ -0,0 +1,58 @@
+//===- PtrUseVisitor.cpp - InstVisitors over a pointers uses --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Implementation of the pointer use visitors.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/PtrUseVisitor.h"
+
+using namespace llvm;
+
+void detail::PtrUseVisitorBase::enqueueUsers(Instruction &I) {
+ for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
+ UI != UE; ++UI) {
+ if (VisitedUses.insert(&UI.getUse())) {
+ UseToVisit NewU = {
+ UseToVisit::UseAndIsOffsetKnownPair(&UI.getUse(), IsOffsetKnown),
+ Offset
+ };
+ Worklist.push_back(llvm_move(NewU));
+ }
+ }
+}
+
+bool detail::PtrUseVisitorBase::adjustOffsetForGEP(GetElementPtrInst &GEPI) {
+ if (!IsOffsetKnown)
+ return false;
+
+ for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
+ GTI != GTE; ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
+ if (!OpC)
+ return false;
+ if (OpC->isZero())
+ continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ unsigned ElementIdx = OpC->getZExtValue();
+ const StructLayout *SL = DL.getStructLayout(STy);
+ Offset += APInt(Offset.getBitWidth(),
+ SL->getElementOffset(ElementIdx));
+ continue;
+ }
+
+ // For array or vector indices, scale the index by the size of the type.
+ APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth());
+ Offset += Index * APInt(Offset.getBitWidth(),
+ DL.getTypeAllocSize(GTI.getIndexedType()));
+ }
+ return true;
+}
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index de4c359ab0..1c220ca0f6 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -31,6 +31,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/Loads.h"
+#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Constants.h"
#include "llvm/DIBuilder.h"
@@ -404,106 +405,17 @@ private:
};
}
-template <typename DerivedT, typename RetT>
-class AllocaPartitioning::BuilderBase
- : public InstVisitor<DerivedT, RetT> {
-public:
- BuilderBase(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
- : TD(TD),
- AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())),
- P(P) {
- enqueueUsers(AI, 0);
- }
-
-protected:
- const DataLayout &TD;
- const uint64_t AllocSize;
- AllocaPartitioning &P;
-
- SmallPtrSet<Use *, 8> VisitedUses;
-
- struct OffsetUse {
- Use *U;
- int64_t Offset;
- };
- SmallVector<OffsetUse, 8> Queue;
-
- // The active offset and use while visiting.
- Use *U;
- int64_t Offset;
-
- void enqueueUsers(Instruction &I, int64_t UserOffset) {
- for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
- UI != UE; ++UI) {
- if (VisitedUses.insert(&UI.getUse())) {
- OffsetUse OU = { &UI.getUse(), UserOffset };
- Queue.push_back(OU);
- }
- }
+static Value *foldSelectInst(SelectInst &SI) {
+ // If the condition being selected on is a constant or the same value is
+ // being selected between, fold the select. Yes this does (rarely) happen
+ // early on.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
+ return SI.getOperand(1+CI->isZero());
+ if (SI.getOperand(1) == SI.getOperand(2)) {
+ return SI.getOperand(1);
}
-
- bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) {
- GEPOffset = Offset;
- for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
- GTI != GTE; ++GTI) {
- ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
- if (!OpC)
- return false;
- if (OpC->isZero())
- continue;
-
- // Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- unsigned ElementIdx = OpC->getZExtValue();
- const StructLayout *SL = TD.getStructLayout(STy);
- uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
- // Check that we can continue to model this GEP in a signed 64-bit offset.
- if (ElementOffset > INT64_MAX ||
- (GEPOffset >= 0 &&
- ((uint64_t)GEPOffset + ElementOffset) > INT64_MAX)) {
- DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
- << "what can be represented in an int64_t!\n"
- << " alloca: " << P.AI << "\n");
- return false;
- }
- if (GEPOffset < 0)
- GEPOffset = ElementOffset + (uint64_t)-GEPOffset;
- else
- GEPOffset += ElementOffset;
- continue;
- }
-
- APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits());
- Index *= APInt(Index.getBitWidth(),
- TD.getTypeAllocSize(GTI.getIndexedType()));
- Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset,
- /*isSigned*/true);
- // Check if the result can be stored in our int64_t offset.
- if (!Index.isSignedIntN(sizeof(GEPOffset) * 8)) {
- DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
- << "what can be represented in an int64_t!\n"
- << " alloca: " << P.AI << "\n");
- return false;
- }
-
- GEPOffset = Index.getSExtValue();
- }
- return true;
- }
-
- Value *foldSelectInst(SelectInst &SI) {
- // If the condition being selected on is a constant or the same value is
- // being selected between, fold the select. Yes this does (rarely) happen
- // early on.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
- return SI.getOperand(1+CI->isZero());
- if (SI.getOperand(1) == SI.getOperand(2)) {
- assert(*U == SI.getOperand(1));
- return SI.getOperand(1);
- }
- return 0;
- }
-};
+ return 0;
+}
/// \brief Builder for the alloca partitioning.
///
@@ -511,38 +423,28 @@ protected:
/// of an alloca and splitting the partitions for each load and store at each
/// offset.
class AllocaPartitioning::PartitionBuilder
- : public BuilderBase<PartitionBuilder, bool> {
- friend class InstVisitor<PartitionBuilder, bool>;
+ : public PtrUseVisitor<PartitionBuilder> {
+ friend class PtrUseVisitor<PartitionBuilder>;
+ friend class InstVisitor<PartitionBuilder>;
+ typedef PtrUseVisitor<PartitionBuilder> Base;
+
+ const uint64_t AllocSize;
+ AllocaPartitioning &P;
SmallDenseMap<Instruction *, unsigned> MemTransferPartitionMap;
public:
- PartitionBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
- : BuilderBase<PartitionBuilder, bool>(TD, AI, P) {}
-
- /// \brief Run the builder over the allocation.
- bool operator()() {
- while (!Queue.empty()) {
- U = Queue.back().U;
- Offset = Queue.back().Offset;
- Queue.pop_back();
- if (!visit(cast<Instruction>(U->getUser())))
- return false;
- }
- return true;
- }
+ PartitionBuilder(const DataLayout &DL, AllocaInst &AI, AllocaPartitioning &P)
+ : PtrUseVisitor<PartitionBuilder>(DL),
+ AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())),
+ P(P) {}
private:
- bool markAsEscaping(Instruction &I) {
- P.PointerEscapingInstr = &I;
- return false;
- }
-
- void insertUse(Instruction &I, int64_t Offset, uint64_t Size,
+ void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
bool IsSplittable = false) {
// Completely skip uses which have a zero size or start either before or
// past the end of the allocation.
- if (Size == 0 || Offset < 0 || (uint64_t)Offset >= AllocSize) {
+ if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
<< " which has zero size or starts outside of the "
<< AllocSize << " byte alloca:\n"
@@ -551,7 +453,8 @@ private:
return;
}
- uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
+ uint64_t BeginOffset = Offset.getZExtValue();
+ uint64_t EndOffset = BeginOffset + Size;
// Clamp the end offset to the end of the allocation. Note that this is
// formulated to handle even the case where "BeginOffset + Size" overflows.
@@ -572,9 +475,9 @@ private:
P.Partitions.push_back(New);
}
- bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset,
+ void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
bool IsVolatile) {
- uint64_t Size = TD.getTypeStoreSize(Ty);
+ uint64_t Size = DL.getTypeStoreSize(Ty);
// If this memory access can be shown to *statically* extend outside the
// bounds of of the allocation, it's behavior is undefined, so simply
@@ -583,15 +486,15 @@ private:
// risk of overflow.
// FIXME: We should instead consider the pointer to have escaped if this
// function is being instrumented for addressing bugs or race conditions.
- if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
- Size > (AllocSize - (uint64_t)Offset)) {
+ if (Offset.isNegative() || Size > AllocSize ||
+ Offset.ugt(AllocSize - Size)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte "
<< (isa<LoadInst>(I) ? "load" : "store") << " @" << Offset
<< " which extends past the end of the " << AllocSize
<< " byte alloca:\n"
<< " alloca: " << P.AI << "\n"
<< " use: " << I << "\n");
- return true;
+ return;
}
// We allow splitting of loads and stores where the type is an integer type
@@ -602,54 +505,61 @@ private:
IsSplittable = !IsVolatile && ITy->getBitWidth() == AllocSize*8;
insertUse(I, Offset, Size, IsSplittable);
- return true;
- }
-
- bool visitBitCastInst(BitCastInst &BC) {
- enqueueUsers(BC, Offset);
- return true;
}
- bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
- int64_t GEPOffset;
- if (!computeConstantGEPOffset(GEPI, GEPOffset))
- return markAsEscaping(GEPI);
-
- enqueueUsers(GEPI, GEPOffset);
- return true;
- }
-
- bool visitLoadInst(LoadInst &LI) {
+ void visitLoadInst(LoadInst &LI) {
assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
"All simple FCA loads should have been pre-split");
+
+ if (!IsOffsetKnown)
+ return PI.setAborted(&LI);
+
return handleLoadOrStore(LI.getType(), LI, Offset, LI.isVolatile());
}
- bool visitStoreInst(StoreInst &SI) {
+ void visitStoreInst(StoreInst &SI) {
Value *ValOp = SI.getValueOperand();
if (ValOp == *U)
- return markAsEscaping(SI);
+ return PI.setEscapedAndAborted(&SI);
+ if (!IsOffsetKnown)
+ return PI.setAborted(&SI);
assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
"All simple FCA stores should have been pre-split");
- return handleLoadOrStore(ValOp->getType(), SI, Offset, SI.isVolatile());
+ handleLoadOrStore(ValOp->getType(), SI, Offset, SI.isVolatile());
}
- bool visitMemSetInst(MemSetInst &II) {
+ void visitMemSetInst(MemSetInst &II) {
assert(II.getRawDest() == *U && "Pointer use is not the destination?");
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
- insertUse(II, Offset, Size, Length);
- return true;
+ if ((Length && Length->getValue() == 0) ||
+ (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
+ // Zero-length mem transfer intrinsics can be ignored entirely.
+ return;
+
+ if (!IsOffsetKnown)
+ return PI.setAborted(&II);
+
+ insertUse(II, Offset,
+ Length ? Length->getLimitedValue()
+ : AllocSize - Offset.getLimitedValue(),
+ (bool)Length);
}
- bool visitMemTransferInst(MemTransferInst &II) {
+ void visitMemTransferInst(MemTransferInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
- if (!Size)
+ if ((Length && Length->getValue() == 0) ||
+ (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
// Zero-length mem transfer intrinsics can be ignored entirely.
- return true;
+ return;
+
+ if (!IsOffsetKnown)
+ return PI.setAborted(&II);
+
+ uint64_t RawOffset = Offset.getLimitedValue();
+ uint64_t Size = Length ? Length->getLimitedValue()
+ : AllocSize - RawOffset;
MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
@@ -657,12 +567,12 @@ private:
Offsets.IsSplittable = Length;
if (*U == II.getRawDest()) {
- Offsets.DestBegin = Offset;
- Offsets.DestEnd = Offset + Size;
+ Offsets.DestBegin = RawOffset;
+ Offsets.DestEnd = RawOffset + Size;
}
if (*U == II.getRawSource()) {
- Offsets.SourceBegin = Offset;
- Offsets.SourceEnd = Offset + Size;
+ Offsets.SourceBegin = RawOffset;
+ Offsets.SourceEnd = RawOffset + Size;
}
// If we have set up end offsets for both the source and the destination,
@@ -675,7 +585,7 @@ private:
// In that case, we can completely elide the transfer.
if (!II.isVolatile() && Offsets.SourceBegin == Offsets.DestBegin) {
P.Partitions[PrevIdx].kill();
- return true;
+ return;
}
// Otherwise we have an offset transfer within the same alloca. We can't
@@ -688,7 +598,7 @@ private:
// For non-volatile transfers this is a no-op.
if (!II.isVolatile())
- return true;
+ return;
// Otherwise just suppress splitting.
Offsets.IsSplittable = false;
@@ -708,23 +618,25 @@ private:
"Already have intrinsic in map but haven't seen both ends");
(void)Inserted;
}
-
- return true;
}
// Disable SRoA for any intrinsics except for lifetime invariants.
// FIXME: What about debug instrinsics? This matches old behavior, but
// doesn't make sense.
- bool visitIntrinsicInst(IntrinsicInst &II) {
+ void visitIntrinsicInst(IntrinsicInst &II) {
+ if (!IsOffsetKnown)
+ return PI.setAborted(&II);
+
if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
II.getIntrinsicID() == Intrinsic::lifetime_end) {
ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
- uint64_t Size = std::min(AllocSize - Offset, Length->getLimitedValue());
+ uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
+ Length->getLimitedValue());
insertUse(II, Offset, Size, true);
- return true;
+ return;
}
- return markAsEscaping(II);
+ Base::visitIntrinsicInst(II);
}
Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
@@ -744,14 +656,14 @@ private:
llvm::tie(UsedI, I) = Uses.pop_back_val();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
- Size = std::max(Size, TD.getTypeStoreSize(LI->getType()));
+ Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
continue;
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Op = SI->getOperand(0);
if (Op == UsedI)
return SI;
- Size = std::max(Size, TD.getTypeStoreSize(Op->getType()));
+ Size = std::max(Size, DL.getTypeStoreSize(Op->getType()));
continue;
}
@@ -772,54 +684,62 @@ private:
return 0;
}
- bool visitPHINode(PHINode &PN) {
+ void visitPHINode(PHINode &PN) {
+ if (PN.use_empty())
+ return;
+ if (!IsOffsetKnown)
+ return PI.setAborted(&PN);
+
// See if we already have computed info on this node.
std::pair<uint64_t, bool> &PHIInfo = P.PHIOrSelectSizes[&PN];
if (PHIInfo.first) {
PHIInfo.second = true;
insertUse(PN, Offset, PHIInfo.first);
- return true;
+ return;
}
// Check for an unsafe use of the PHI node.
- if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first))
- return markAsEscaping(*EscapingI);
+ if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first))
+ return PI.setAborted(UnsafeI);
insertUse(PN, Offset, PHIInfo.first);
- return true;
}
- bool visitSelectInst(SelectInst &SI) {
+ void visitSelectInst(SelectInst &SI) {
+ if (SI.use_empty())
+ return;
if (Value *Result = foldSelectInst(SI)) {
if (Result == *U)
// If the result of the constant fold will be the pointer, recurse
// through the select as if we had RAUW'ed it.
- enqueueUsers(SI, Offset);
+ enqueueUsers(SI);
- return true;
+ return;
}
+ if (!IsOffsetKnown)
+ return PI.setAborted(&SI);
// See if we already have computed info on this node.
std::pair<uint64_t, bool> &SelectInfo = P.PHIOrSelectSizes[&SI];
if (SelectInfo.first) {
SelectInfo.second = true;
insertUse(SI, Offset, SelectInfo.first);
- return true;
+ return;
}
// Check for an unsafe use of the PHI node.
- if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first))
- return markAsEscaping(*EscapingI);
+ if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first))
+ return PI.setAborted(UnsafeI);
insertUse(SI, Offset, SelectInfo.first);
- return true;
}
/// \brief Disable SROA entirely if there are unhandled users of the alloca.
- bool visitInstruction(Instruction &I) { return markAsEscaping(I); }
+ void visitInstruction(Instruction &I) {
+ PI.setAborted(&I);
+ }
};
-
/// \brief Use adder for the alloca partitioning.
///
/// This class adds the uses of an alloca to all of the partitions which they
@@ -838,25 +758,22 @@ private:
/// partition space is pre-sorted, and do a logarithmic search for the
/// partition needed, making the total visit a classical ((N + M) * log(N))
/// complexity operation.
-class AllocaPartitioning::UseBuilder : public BuilderBase<UseBuilder> {
+class AllocaPartitioning::UseBuilder : public PtrUseVisitor<UseBuilder> {
+ friend class PtrUseVisitor<UseBuilder>;
friend class InstVisitor<UseBuilder>;
+ typedef PtrUseVisitor<UseBuilder> Base;
+
+ const uint64_t AllocSize;
+ AllocaPartitioning &P;
/// \brief Set to de-duplicate dead instructions found in the use walk.
SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
public:
UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
- : BuilderBase<UseBuilder>(TD, AI, P) {}
-
- /// \brief Run the builder over the allocation.
- void operator()() {
- while (!Queue.empty()) {
- U = Queue.back().U;
- Offset = Queue.back().Offset;
- Queue.pop_back();
- this->visit(cast<Instruction>(U->getUser()));
- }
- }
+ : PtrUseVisitor<UseBuilder>(TD),
+ AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())),
+ P(P) {}
private:
void markAsDead(Instruction &I) {
@@ -864,13 +781,14 @@ private:
P.DeadUsers.push_back(&I);
}
- void insertUse(Instruction &User, int64_t Offset, uint64_t Size) {
+ void insertUse(Instruction &User, const APInt &Offset, uint64_t Size) {
// If the use has a zero size or extends outside of the allocation, record
// it as a dead use for elimination later.
- if (Size == 0 || Offset < 0 || (uint64_t)Offset >= AllocSize)
+ if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize))
return markAsDead(User);
- uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
+ uint64_t BeginOffset = Offset.getZExtValue();
+ uint64_t EndOffset = BeginOffset + Size;
// Clamp the end offset to the end of the allocation. Note that this is
// formulated to handle even the case where "BeginOffset + Size" overflows.
@@ -893,15 +811,15 @@ private:
}
}
- void handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
- uint64_t Size = TD.getTypeStoreSize(Ty);
+ void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset) {
+ uint64_t Size = DL.getTypeStoreSize(Ty);
// If this memory access can be shown to *statically* extend outside the
// bounds of of the allocation, it's behavior is undefined, so simply
// ignore it. Note that this is more strict than the generic clamping
// behavior of insertUse.
- if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
- Size > (AllocSize - (uint64_t)Offset))
+ if (Offset.isNegative() || Size > AllocSize ||
+ Offset.ugt(AllocSize - Size))
return markAsDead(I);
insertUse(I, Offset, Size);
@@ -911,40 +829,47 @@ private:
if (BC.use_empty())
return markAsDead(BC);
- enqueueUsers(BC, Offset);
+ return Base::visitBitCastInst(BC);
}
void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
if (GEPI.use_empty())
return markAsDead(GEPI);
- int64_t GEPOffset;
- if (!computeConstantGEPOffset(GEPI, GEPOffset))
- llvm_unreachable("Unable to compute constant offset for use");
-
- enqueueUsers(GEPI, GEPOffset);
+ return Base::visitGetElementPtrInst(GEPI);
}
void visitLoadInst(LoadInst &LI) {
+ assert(IsOffsetKnown);
handleLoadOrStore(LI.getType(), LI, Offset);
}
void visitStoreInst(StoreInst &SI) {
+ assert(IsOffsetKnown);
handleLoadOrStore(SI.getOperand(0)->getType(), SI, Offset);
}
void visitMemSetInst(MemSetInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
- insertUse(II, Offset, Size);
+ if ((Length && Length->getValue() == 0) ||
+ (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
+ return markAsDead(II);
+
+ assert(IsOffsetKnown);
+ insertUse(II, Offset, Length ? Length->getLimitedValue()
+ : AllocSize - Offset.getLimitedValue());
}
void visitMemTransferInst(MemTransferInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
- if (!Size)
+ if ((Length && Length->getValue() == 0) ||
+ (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
return markAsDead(II);
+ assert(IsOffsetKnown);
+ uint64_t Size = Length ? Length->getLimitedValue()
+ : AllocSize - Offset.getLimitedValue();
+
MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
if (!II.isVolatile() && Offsets.DestEnd && Offsets.SourceEnd &&
Offsets.DestBegin == Offsets.SourceBegin)
@@ -954,34 +879,39 @@ private:
}
void visitIntrinsicInst(IntrinsicInst &II) {
+ assert(IsOffsetKnown);
assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
II.getIntrinsicID() == Intrinsic::lifetime_end);
ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
- insertUse(II, Offset,
- std::min(AllocSize - Offset, Length->getLimitedValue()));
+ insertUse(II, Offset, std::min(Length->getLimitedValue(),
+ AllocSize - Offset.getLimitedValue()));
}
- void insertPHIOrSelect(Instruction &User, uint64_t Offset) {
+ void insertPHIOrSelect(Instruction &User, const APInt &Offset) {
uint64_t Size = P.PHIOrSelectSizes.lookup(&User).first;
// For PHI and select operands outside the alloca, we can't nuke the entire
// phi or select -- the other side might still be relevant, so we special
// case them here and use a separate structure to track the operands
// themselves which should be replaced with undef.
- if (Offset >= AllocSize) {
+ if ((Offset.isNegative() && Offset.uge(Size)) ||
+ (!Offset.isNegative() && Offset.uge(AllocSize))) {
P.DeadOperands.push_back(U);
return;
}
insertUse(User, Offset, Size);
}
+
void visitPHINode(PHINode &PN) {
if (PN.use_empty())
return markAsDead(PN);
+ assert(IsOffsetKnown);
insertPHIOrSelect(PN, Offset);
}
+
void visitSelectInst(SelectInst &SI) {
if (SI.use_empty())
return markAsDead(SI);
@@ -990,7 +920,7 @@ private:
if (Result == *U)
// If the result of the constant fold will be the pointer, recurse
// through the select as if we had RAUW'ed it.
- enqueueUsers(SI, Offset);
+ enqueueUsers(SI);
else
// Otherwise the operand to the select is dead, and we can replace it
// with undef.
@@ -999,6 +929,7 @@ private:
return;
}
+ assert(IsOffsetKnown);
insertPHIOrSelect(SI, Offset);
}
@@ -1110,8 +1041,15 @@ AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI)
#endif
PointerEscapingInstr(0) {
PartitionBuilder PB(TD, AI, *this);
- if (!PB())
+ PartitionBuilder::PtrInfo PtrI = PB.visitPtr(AI);
+ if (PtrI.isEscaped() || PtrI.isAborted()) {
+ // FIXME: We should sink the escape vs. abort info into the caller nicely,
+ // possibly by just storing the PtrInfo in the AllocaPartitioning.
+ PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
+ : PtrI.getAbortingInst();
+ assert(PointerEscapingInstr && "Did not track a bad instruction");
return;
+ }
// Sort the uses. This arranges for the offsets to be in ascending order,
// and the sizes to be in descending order.
@@ -1145,7 +1083,9 @@ AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI)
// re-walking the recursive users of the alloca.
Uses.resize(Partitions.size());
UseBuilder UB(TD, AI, *this);
- UB();
+ PtrI = UB.visitPtr(AI);
+ assert(!PtrI.isEscaped() && "Previously analyzed pointer now escapes!");
+ assert(!PtrI.isAborted() && "Early aborted the visit of the pointer.");
}
Type *AllocaPartitioning::getCommonType(iterator I) const {