aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms/Scalar/GVN.cpp
diff options
context:
space:
mode:
authorNadav Rotem <nadav.rotem@intel.com>2012-07-24 10:51:42 +0000
committerNadav Rotem <nadav.rotem@intel.com>2012-07-24 10:51:42 +0000
commita94d6e87c4c49f2e81b01d66d8bfb591277f8f96 (patch)
tree11026d8db318f33530de45b3abbb9606d0f7ae5e /lib/Transforms/Scalar/GVN.cpp
parent8899d5c6fb3cf118c5c73eade290b6ebb2b3b850 (diff)
Clean whitespaces.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160668 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar/GVN.cpp')
-rw-r--r--lib/Transforms/Scalar/GVN.cpp266
1 files changed, 133 insertions, 133 deletions
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 476ec383e6..140864d5f8 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -173,7 +173,7 @@ Expression ValueTable::create_expression(Instruction *I) {
if (e.varargs[0] > e.varargs[1])
std::swap(e.varargs[0], e.varargs[1]);
}
-
+
if (CmpInst *C = dyn_cast<CmpInst>(I)) {
// Sort the operand value numbers so x<y and y>x get the same value number.
CmpInst::Predicate Predicate = C->getPredicate();
@@ -187,7 +187,7 @@ Expression ValueTable::create_expression(Instruction *I) {
II != IE; ++II)
e.varargs.push_back(*II);
}
-
+
return e;
}
@@ -391,7 +391,7 @@ uint32_t ValueTable::lookup_or_add(Value *V) {
valueNumbering[V] = nextValueNumber;
return nextValueNumber++;
}
-
+
Instruction* I = cast<Instruction>(V);
Expression exp;
switch (I->getOpcode()) {
@@ -507,7 +507,7 @@ namespace {
const TargetLibraryInfo *TLI;
ValueTable VN;
-
+
/// LeaderTable - A mapping from value numbers to lists of Value*'s that
/// have that value number. Use findLeader to query it.
struct LeaderTableEntry {
@@ -517,7 +517,7 @@ namespace {
};
DenseMap<uint32_t, LeaderTableEntry> LeaderTable;
BumpPtrAllocator TableAllocator;
-
+
SmallVector<Instruction*, 8> InstrsToErase;
public:
static char ID; // Pass identification, replacement for typeid
@@ -527,14 +527,14 @@ namespace {
}
bool runOnFunction(Function &F);
-
+
/// markInstructionForDeletion - This removes the specified instruction from
/// our various maps and marks it for deletion.
void markInstructionForDeletion(Instruction *I) {
VN.erase(I);
InstrsToErase.push_back(I);
}
-
+
const TargetData *getTargetData() const { return TD; }
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
@@ -549,14 +549,14 @@ namespace {
Curr.BB = BB;
return;
}
-
+
LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>();
Node->Val = V;
Node->BB = BB;
Node->Next = Curr.Next;
Curr.Next = Node;
}
-
+
/// removeFromLeaderTable - Scan the list of values corresponding to a given
/// value number, and remove the given instruction if encountered.
void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) {
@@ -567,7 +567,7 @@ namespace {
Prev = Curr;
Curr = Curr->Next;
}
-
+
if (Prev) {
Prev->Next = Curr->Next;
} else {
@@ -597,7 +597,7 @@ namespace {
AU.addPreserved<DominatorTree>();
AU.addPreserved<AliasAnalysis>();
}
-
+
// Helper fuctions
// FIXME: eliminate or document these better
@@ -735,15 +735,15 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
StoredVal->getType()->isStructTy() ||
StoredVal->getType()->isArrayTy())
return false;
-
+
// The store has to be at least as big as the load.
if (TD.getTypeSizeInBits(StoredVal->getType()) <
TD.getTypeSizeInBits(LoadTy))
return false;
-
+
return true;
}
-
+
/// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
/// then a load from a must-aliased pointer of a different type, try to coerce
@@ -751,80 +751,80 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
/// InsertPt is the place to insert new instructions.
///
/// If we can't do it, return null.
-static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
+static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
Type *LoadedTy,
Instruction *InsertPt,
const TargetData &TD) {
if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
return 0;
-
+
// If this is already the right type, just return it.
Type *StoredValTy = StoredVal->getType();
-
+
uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
-
+
// If the store and reload are the same size, we can always reuse it.
if (StoreSize == LoadSize) {
// Pointer to Pointer -> use bitcast.
if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy())
return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
-
+
// Convert source pointers to integers, which can be bitcast.
if (StoredValTy->isPointerTy()) {
StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
-
+
Type *TypeToCastTo = LoadedTy;
if (TypeToCastTo->isPointerTy())
TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
-
+
if (StoredValTy != TypeToCastTo)
StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
-
+
// Cast to pointer if the load needs a pointer type.
if (LoadedTy->isPointerTy())
StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
-
+
return StoredVal;
}
-
+
// If the loaded value is smaller than the available value, then we can
// extract out a piece from it. If the available value is too small, then we
// can't do anything.
assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
-
+
// Convert source pointers to integers, which can be manipulated.
if (StoredValTy->isPointerTy()) {
StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
-
+
// Convert vectors and fp to integer, which can be manipulated.
if (!StoredValTy->isIntegerTy()) {
StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
}
-
+
// If this is a big-endian system, we need to shift the value down to the low
// bits so that a truncate will work.
if (TD.isBigEndian()) {
Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
}
-
+
// Truncate the integer to the right size now.
Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
-
+
if (LoadedTy == NewIntTy)
return StoredVal;
-
+
// If the result is a pointer, inttoptr.
if (LoadedTy->isPointerTy())
return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
-
+
// Otherwise, bitcast.
return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
}
@@ -845,13 +845,13 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy())
return -1;
-
+
int64_t StoreOffset = 0, LoadOffset = 0;
Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset,TD);
Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
if (StoreBase != LoadBase)
return -1;
-
+
// If the load and store are to the exact same address, they should have been
// a must alias. AA must have gotten confused.
// FIXME: Study to see if/when this happens. One case is forwarding a memset
@@ -866,18 +866,18 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
abort();
}
#endif
-
+
// If the load and store don't overlap at all, the store doesn't provide
// anything to the load. In this case, they really don't alias at all, AA
// must have gotten confused.
uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
-
+
if ((WriteSizeInBits & 7) | (LoadSize & 7))
return -1;
uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes.
LoadSize >>= 3;
-
-
+
+
bool isAAFailure = false;
if (StoreOffset < LoadOffset)
isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
@@ -895,7 +895,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
#endif
return -1;
}
-
+
// If the Load isn't completely contained within the stored bits, we don't
// have all the bits to feed it. We could do something crazy in the future
// (issue a smaller load then merge the bits in) but this seems unlikely to be
@@ -903,11 +903,11 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
if (StoreOffset > LoadOffset ||
StoreOffset+StoreSize < LoadOffset+LoadSize)
return -1;
-
+
// Okay, we can do this transformation. Return the number of bytes into the
// store that the load is.
return LoadOffset-StoreOffset;
-}
+}
/// AnalyzeLoadFromClobberingStore - This function is called when we have a
/// memdep query of a load that ends up being a clobbering store.
@@ -933,23 +933,23 @@ static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
// Cannot handle reading from store of first-class aggregate yet.
if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
return -1;
-
+
Value *DepPtr = DepLI->getPointerOperand();
uint64_t DepSize = TD.getTypeSizeInBits(DepLI->getType());
int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, TD);
if (R != -1) return R;
-
+
// If we have a load/load clobber an DepLI can be widened to cover this load,
// then we should widen it!
int64_t LoadOffs = 0;
const Value *LoadBase =
GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, TD);
unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
-
+
unsigned Size = MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, TD);
if (Size == 0) return -1;
-
+
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, TD);
}
@@ -968,29 +968,29 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
if (MI->getIntrinsicID() == Intrinsic::memset)
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
MemSizeInBits, TD);
-
+
// If we have a memcpy/memmove, the only case we can handle is if this is a
// copy from constant memory. In that case, we can read directly from the
// constant memory.
MemTransferInst *MTI = cast<MemTransferInst>(MI);
-
+
Constant *Src = dyn_cast<Constant>(MTI->getSource());
if (Src == 0) return -1;
-
+
GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &TD));
if (GV == 0 || !GV->isConstant()) return -1;
-
+
// See if the access is within the bounds of the transfer.
int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
MI->getDest(), MemSizeInBits, TD);
if (Offset == -1)
return Offset;
-
+
// Otherwise, see if we can constant fold a load from the constant with the
// offset applied as appropriate.
Src = ConstantExpr::getBitCast(Src,
llvm::Type::getInt8PtrTy(Src->getContext()));
- Constant *OffsetCst =
+ Constant *OffsetCst =
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
@@ -998,7 +998,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
return Offset;
return -1;
}
-
+
/// GetStoreValueForLoad - This function is called when we have a
/// memdep query of a load that ends up being a clobbering store. This means
@@ -1009,32 +1009,32 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
Type *LoadTy,
Instruction *InsertPt, const TargetData &TD){
LLVMContext &Ctx = SrcVal->getType()->getContext();
-
+
uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
-
+
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
-
+
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
if (SrcVal->getType()->isPointerTy())
SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx));
if (!SrcVal->getType()->isIntegerTy())
SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8));
-
+
// Shift the bits to the least significant depending on endianness.
unsigned ShiftAmt;
if (TD.isLittleEndian())
ShiftAmt = Offset*8;
else
ShiftAmt = (StoreSize-LoadSize-Offset)*8;
-
+
if (ShiftAmt)
SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt);
-
+
if (LoadSize != StoreSize)
SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8));
-
+
return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
}
@@ -1061,14 +1061,14 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
NewLoadSize = NextPowerOf2(NewLoadSize);
Value *PtrVal = SrcVal->getPointerOperand();
-
+
// Insert the new load after the old load. This ensures that subsequent
// memdep queries will find the new load. We can't easily remove the old
// load completely because it is already in the value numbering table.
IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal));
- Type *DestPTy =
+ Type *DestPTy =
IntegerType::get(LoadTy->getContext(), NewLoadSize*8);
- DestPTy = PointerType::get(DestPTy,
+ DestPTy = PointerType::get(DestPTy,
cast<PointerType>(PtrVal->getType())->getAddressSpace());
Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc());
PtrVal = Builder.CreateBitCast(PtrVal, DestPTy);
@@ -1078,7 +1078,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n");
DEBUG(dbgs() << "TO: " << *NewLoad << "\n");
-
+
// Replace uses of the original load with the wider load. On a big endian
// system, we need to shift down to get the relevant bits.
Value *RV = NewLoad;
@@ -1087,7 +1087,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits());
RV = Builder.CreateTrunc(RV, SrcVal->getType());
SrcVal->replaceAllUsesWith(RV);
-
+
// We would like to use gvn.markInstructionForDeletion here, but we can't
// because the load is already memoized into the leader map table that GVN
// tracks. It is potentially possible to remove the load from the table,
@@ -1096,7 +1096,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
gvn.getMemDep().removeInstruction(SrcVal);
SrcVal = NewLoad;
}
-
+
return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, TD);
}
@@ -1110,7 +1110,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
-
+
// We know that this method is only called when the mem transfer fully
// provides the bits for the load.
if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
@@ -1119,9 +1119,9 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
Value *Val = MSI->getValue();
if (LoadSize != 1)
Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
-
+
Value *OneElt = Val;
-
+
// Splat the value out to the right number of bits.
for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
// If we can double the number of bytes set, do it.
@@ -1131,16 +1131,16 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
NumBytesSet <<= 1;
continue;
}
-
+
// Otherwise insert one byte at a time.
Value *ShVal = Builder.CreateShl(Val, 1*8);
Val = Builder.CreateOr(OneElt, ShVal);
++NumBytesSet;
}
-
+
return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
}
-
+
// Otherwise, this is a memcpy/memmove from a constant global.
MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
Constant *Src = cast<Constant>(MTI->getSource());
@@ -1149,7 +1149,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
// offset applied as appropriate.
Src = ConstantExpr::getBitCast(Src,
llvm::Type::getInt8PtrTy(Src->getContext()));
- Constant *OffsetCst =
+ Constant *OffsetCst =
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
@@ -1166,13 +1166,13 @@ struct AvailableValueInBlock {
LoadVal, // A value produced by a load.
MemIntrin // A memory intrinsic which is loaded from.
};
-
+
/// V - The value that is live out of the block.
PointerIntPair<Value *, 2, ValType> Val;
-
+
/// Offset - The byte offset in Val that is interesting for the load query.
unsigned Offset;
-
+
static AvailableValueInBlock get(BasicBlock *BB, Value *V,
unsigned Offset = 0) {
AvailableValueInBlock Res;
@@ -1192,7 +1192,7 @@ struct AvailableValueInBlock {
Res.Offset = Offset;
return Res;
}
-
+
static AvailableValueInBlock getLoad(BasicBlock *BB, LoadInst *LI,
unsigned Offset = 0) {
AvailableValueInBlock Res;
@@ -1211,17 +1211,17 @@ struct AvailableValueInBlock {
assert(isSimpleValue() && "Wrong accessor");
return Val.getPointer();
}
-
+
LoadInst *getCoercedLoadValue() const {
assert(isCoercedLoadValue() && "Wrong accessor");
return cast<LoadInst>(Val.getPointer());
}
-
+
MemIntrinsic *getMemIntrinValue() const {
assert(isMemIntrinValue() && "Wrong accessor");
return cast<MemIntrinsic>(Val.getPointer());
}
-
+
/// MaterializeAdjustedValue - Emit code into this block to adjust the value
/// defined here to the specified type. This handles various coercion cases.
Value *MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) const {
@@ -1233,7 +1233,7 @@ struct AvailableValueInBlock {
assert(TD && "Need target data to handle type mismatch case");
Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
*TD);
-
+
DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
<< *getSimpleValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@@ -1245,7 +1245,7 @@ struct AvailableValueInBlock {
} else {
Res = GetLoadValueForLoad(Load, Offset, LoadTy, BB->getTerminator(),
gvn);
-
+
DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " "
<< *getCoercedLoadValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@@ -1268,12 +1268,12 @@ struct AvailableValueInBlock {
/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
/// construct SSA form, allowing us to eliminate LI. This returns the value
/// that should be used at LI's definition site.
-static Value *ConstructSSAForLoadSet(LoadInst *LI,
+static Value *ConstructSSAForLoadSet(LoadInst *LI,
SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
GVN &gvn) {
// Check for the fully redundant, dominating load case. In this case, we can
// just use the dominating value directly.
- if (ValuesPerBlock.size() == 1 &&
+ if (ValuesPerBlock.size() == 1 &&
gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
LI->getParent()))
return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), gvn);
@@ -1282,29 +1282,29 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
SmallVector<PHINode*, 8> NewPHIs;
SSAUpdater SSAUpdate(&NewPHIs);
SSAUpdate.Initialize(LI->getType(), LI->getName());
-
+
Type *LoadTy = LI->getType();
-
+
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
const AvailableValueInBlock &AV = ValuesPerBlock[i];
BasicBlock *BB = AV.BB;
-
+
if (SSAUpdate.HasValueForBlock(BB))
continue;
SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, gvn));
}
-
+
// Perform PHI construction.
Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
-
+
// If new PHI nodes were created, notify alias analysis.
if (V->getType()->isPointerTy()) {
AliasAnalysis *AA = gvn.getAliasAnalysis();
-
+
for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
AA->copyValue(LI, NewPHIs[i]);
-
+
// Now that we've copied information to the new PHIs, scan through
// them again and inform alias analysis that we've added potentially
// escaping uses to any values that are operands to these PHIs.
@@ -1376,7 +1376,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
// the pointer operand of the load if PHI translation occurs. Make sure
// to consider the right address.
Value *Address = Deps[i].getAddress();
-
+
// If the dependence is to a store that writes to a superset of the bits
// read by the load, we can extract the bits we need for the load from the
// stored value.
@@ -1392,7 +1392,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
}
}
}
-
+
// Check to see if we have something like this:
// load i32* P
// load i8* (P+1)
@@ -1404,7 +1404,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(),
LI->getPointerOperand(),
DepLI, *TD);
-
+
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,
Offset));
@@ -1423,10 +1423,10 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
Offset));
continue;
- }
+ }
}
}
-
+
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1443,7 +1443,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
UndefValue::get(LI->getType())));
continue;
}
-
+
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
// different types if we have to.
@@ -1461,7 +1461,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
S->getValueOperand()));
continue;
}
-
+
if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
// If the types mismatch and we can't handle it, reject reuse of the load.
if (LD->getType() != LI->getType()) {
@@ -1470,12 +1470,12 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
UnavailableBlocks.push_back(DepBB);
continue;
- }
+ }
}
ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB, LD));
continue;
}
-
+
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1489,7 +1489,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
// its value. Insert PHIs and remove the fully redundant value now.
if (UnavailableBlocks.empty()) {
DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
-
+
// Perform PHI construction.
Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
LI->replaceAllUsesWith(V);
@@ -1532,10 +1532,10 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
return false;
if (Blockers.count(TmpBB))
return false;
-
+
// If any of these blocks has more than one successor (i.e. if the edge we
- // just traversed was critical), then there are other paths through this
- // block along which the load may not be anticipated. Hoisting the load
+ // just traversed was critical), then there are other paths through this
+ // block along which the load may not be anticipated. Hoisting the load
// above this block would be adding the load to execution paths along
// which it was not previously executed.
if (TmpBB->getTerminator()->getNumSuccessors() != 1)
@@ -1613,7 +1613,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
unsigned NumUnavailablePreds = PredLoads.size();
assert(NumUnavailablePreds != 0 &&
"Fully available value should be eliminated above!");
-
+
// If this load is unavailable in multiple predecessors, reject it.
// FIXME: If we could restructure the CFG, we could make a common pred with
// all the preds that don't have an available LI and insert a new load into
@@ -1690,10 +1690,10 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
DEBUG(if (!NewInsts.empty())
dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
<< *NewInsts.back() << '\n');
-
+
// Assign value numbers to the new instructions.
for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
- // FIXME: We really _ought_ to insert these value numbers into their
+ // FIXME: We really _ought_ to insert these value numbers into their
// parent's availability map. However, in doing so, we risk getting into
// ordering issues. If a block hasn't been processed yet, we would be
// marking a value as AVAIL-IN, which isn't what we intend.
@@ -1795,7 +1795,7 @@ bool GVN::processLoad(LoadInst *L) {
markInstructionForDeletion(L);
return true;
}
-
+
// ... to a pointer that has been loaded from before...
MemDepResult Dep = MD->getDependency(L);
@@ -1821,7 +1821,7 @@ bool GVN::processLoad(LoadInst *L) {
AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
L->getType(), L, *TD);
}
-
+
// Check to see if we have something like this:
// load i32* P
// load i8* (P+1)
@@ -1831,14 +1831,14 @@ bool GVN::processLoad(LoadInst *L) {
// we have the first instruction in the entry block.
if (DepLI == L)
return false;
-
+
int Offset = AnalyzeLoadFromClobberingLoad(L->getType(),
L->getPointerOperand(),
DepLI, *TD);
if (Offset != -1)
AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this);
}
-
+
// If the clobbering value is a memset/memcpy/memmove, see if we can forward
// a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
@@ -1848,11 +1848,11 @@ bool GVN::processLoad(LoadInst *L) {
if (Offset != -1)
AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *TD);
}
-
+
if (AvailVal) {
DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
<< *AvailVal << '\n' << *L << "\n\n\n");
-
+
// Replace the load!
L->replaceAllUsesWith(AvailVal);
if (AvailVal->getType()->isPointerTy())
@@ -1862,7 +1862,7 @@ bool GVN::processLoad(LoadInst *L) {
return true;
}
}
-
+
// If the value isn't available, don't do anything!
if (Dep.isClobber()) {
DEBUG(
@@ -1892,7 +1892,7 @@ bool GVN::processLoad(LoadInst *L) {
Instruction *DepInst = Dep.getInst();
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
Value *StoredVal = DepSI->getValueOperand();
-
+
// The store and load are to a must-aliased pointer, but they may not
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
@@ -1902,11 +1902,11 @@ bool GVN::processLoad(LoadInst *L) {
L, *TD);
if (StoredVal == 0)
return false;
-
+
DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
<< '\n' << *L << "\n\n\n");
}
- else
+ else
return false;
}
@@ -1921,7 +1921,7 @@ bool GVN::processLoad(LoadInst *L) {
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
Value *AvailableVal = DepLI;
-
+
// The loads are of a must-aliased pointer, but they may not actually have
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
@@ -1931,14 +1931,14 @@ bool GVN::processLoad(LoadInst *L) {
L, *TD);
if (AvailableVal == 0)
return false;
-
+
DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
<< "\n" << *L << "\n\n\n");
}
- else
+ else
return false;
}
-
+
// Remove it!
patchAndReplaceAllUsesWith(AvailableVal, L);
if (DepLI->getType()->isPointerTy())
@@ -1957,7 +1957,7 @@ bool GVN::processLoad(LoadInst *L) {
++NumGVNLoad;
return true;
}
-
+
// If this load occurs either right after a lifetime begin,
// then the loaded value is undefined.
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(DepInst)) {
@@ -1972,28 +1972,28 @@ bool GVN::processLoad(LoadInst *L) {
return false;
}
-// findLeader - In order to find a leader for a given value number at a
+// findLeader - In order to find a leader for a given value number at a
// specific basic block, we first obtain the list of all Values for that number,
-// and then scan the list to find one whose block dominates the block in
+// and then scan the list to find one whose block dominates the block in
// question. This is fast because dominator tree queries consist of only
// a few comparisons of DFS numbers.
Value *GVN::findLeader(BasicBlock *BB, uint32_t num) {
LeaderTableEntry Vals = LeaderTable[num];
if (!Vals.Val) return 0;
-
+
Value *Val = 0;
if (DT->dominates(Vals.BB, BB)) {
Val = Vals.Val;
if (isa<Constant>(Val)) return Val;
}
-
+
LeaderTableEntry* Next = Vals.Next;
while (Next) {
if (DT->dominates(Next->BB, BB)) {
if (isa<Constant>(Next->Val)) return Next->Val;
if (!Val) Val = Next->Val;
}
-
+
Next = Next->Next;
}
@@ -2243,7 +2243,7 @@ bool GVN::processInstruction(Instruction *I) {
// Instructions with void type don't return a value, so there's
// no point in trying to find redundancies in them.
if (I->getType()->isVoidTy()) return false;
-
+
uint32_t NextNum = VN.getNextUnusedValueNumber();
unsigned Num = VN.lookup_or_add(I);
@@ -2261,7 +2261,7 @@ bool GVN::processInstruction(Instruction *I) {
addToLeaderTable(Num, I, I->getParent());
return false;
}
-
+
// Perform fast-path value-number based elimination of values inherited from
// dominators.
Value *repl = findLeader(I->getParent(), Num);
@@ -2270,7 +2270,7 @@ bool GVN::processInstruction(Instruction *I) {
addToLeaderTable(Num, I, I->getParent());
return false;
}
-
+
// Remove it!
patchAndReplaceAllUsesWith(repl, I);
if (MD && repl->getType()->isPointerTy())
@@ -2297,7 +2297,7 @@ bool GVN::runOnFunction(Function& F) {
// optimization opportunities.
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
BasicBlock *BB = FI++;
-
+
bool removedBlock = MergeBlockIntoPredecessor(BB, this);
if (removedBlock) ++NumGVNBlocks;
@@ -2454,7 +2454,7 @@ bool GVN::performPRE(Function &F) {
// we would need to insert instructions in more than one pred.
if (NumWithout != 1 || NumWith == 0)
continue;
-
+
// Don't do PRE across indirect branch.
if (isa<IndirectBrInst>(PREPred->getTerminator()))
continue;
@@ -2530,7 +2530,7 @@ bool GVN::performPRE(Function &F) {
unsigned jj = PHINode::getOperandNumForIncomingValue(ii);
VN.getAliasAnalysis()->addEscapingUse(Phi->getOperandUse(jj));
}
-
+
if (MD)
MD->invalidateCachedPointerInfo(Phi);
}
@@ -2567,7 +2567,7 @@ bool GVN::splitCriticalEdges() {
/// iterateOnFunction - Executes one iteration of GVN
bool GVN::iterateOnFunction(Function &F) {
cleanupGlobalSets();
-
+
// Top-down walk of the dominator tree
bool Changed = false;
#if 0
@@ -2602,7 +2602,7 @@ void GVN::verifyRemoved(const Instruction *Inst) const {
I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) {
const LeaderTableEntry *Node = &I->second;
assert(Node->Val != Inst && "Inst still in value numbering scope!");
-
+
while (Node->Next) {
Node = Node->Next;
assert(Node->Val != Inst && "Inst still in value numbering scope!");