aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms/Scalar
diff options
context:
space:
mode:
authorJeff Cohen <jeffc@jolt-lang.org>2005-07-27 06:12:32 +0000
committerJeff Cohen <jeffc@jolt-lang.org>2005-07-27 06:12:32 +0000
commit00b16889ab461b7ecef1c91ade101186b7f1fce2 (patch)
tree263acb2b05b59235d77bee1d38fa842f2044ec0e /lib/Transforms/Scalar
parent54eed36da595f09c46a46b2b0b15757ea486b4c1 (diff)
Eliminate all remaining tabs and trailing spaces.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@22523 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar')
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp14
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp20
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp2
3 files changed, 18 insertions, 18 deletions
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index 41b8f3bc29..dcc7b25289 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -1319,7 +1319,7 @@ struct FoldSetCCLogical {
static bool MaskedValueIsZero(Value *V, ConstantIntegral *Mask) {
// Note, we cannot consider 'undef' to be "IsZero" here. The problem is that
// we cannot optimize based on the assumption that it is zero without changing
- // to to an explicit zero. If we don't change it to zero, other code could
+ // to to an explicit zero. If we don't change it to zero, other code could
// optimized based on the contradictory assumption that it is non-zero.
// Because instcombine aggressively folds operations with undef args anyway,
// this won't lose us code quality.
@@ -2308,7 +2308,7 @@ Instruction *InstCombiner::FoldGEPSetCC(User *GEPLHS, Value *RHS,
// compare the base pointer.
if (PtrBase != GEPRHS->getOperand(0)) {
bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
- IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
+ IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
GEPRHS->getOperand(0)->getType();
if (IndicesTheSame)
for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
@@ -3103,7 +3103,7 @@ Instruction *InstCombiner::visitSetCondInstWithCastAndCast(SetCondInst &SCI) {
}
}
- // Finally, return the value computed.
+ // Finally, return the value computed.
if (SCI.getOpcode() == Instruction::SetLT) {
return ReplaceInstUsesWith(SCI, Result);
} else {
@@ -3167,7 +3167,7 @@ Instruction *InstCombiner::visitShiftInst(ShiftInst &I) {
return new CastInst(V, I.getType());
}
}
-
+
if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(Op1)) {
// shl uint X, 32 = 0 and shr ubyte Y, 9 = 0, ... just don't eliminate shr
// of a signed value.
@@ -3623,7 +3623,7 @@ Instruction *InstCombiner::visitCastInst(CastInst &CI) {
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (Op1C->getRawValue() == 0) {
// If the input only has the low bit set, simplify directly.
- Constant *Not1 =
+ Constant *Not1 =
ConstantExpr::getNot(ConstantInt::get(Op0->getType(), 1));
// cast (X != 0) to int --> X if X&~1 == 0
if (MaskedValueIsZero(Op0, cast<ConstantIntegral>(Not1))) {
@@ -3666,7 +3666,7 @@ Instruction *InstCombiner::visitCastInst(CastInst &CI) {
if ((Op1C->getRawValue() & Op1C->getRawValue()-1) == 0) {
// cast (X == 1) to int -> X iff X has only the low bit set.
if (Op1C->getRawValue() == 1) {
- Constant *Not1 =
+ Constant *Not1 =
ConstantExpr::getNot(ConstantInt::get(Op0->getType(), 1));
if (MaskedValueIsZero(Op0, cast<ConstantIntegral>(Not1))) {
if (CI.getType() == Op0->getType())
@@ -5247,7 +5247,7 @@ bool InstCombiner::runOnFunction(Function &F) {
E = df_ext_end(&F.front(), Visited); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
WorkList.push_back(I);
-
+
// Do a quick scan over the function. If we find any blocks that are
// unreachable, remove any instructions inside of them. This prevents
// the instcombine code from having to deal with some bad special cases.
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index 0990bc5945..1bc6ebe489 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -121,7 +121,7 @@ unsigned Reassociate::getRank(Value *V) {
unsigned &CachedRank = ValueRankMap[I];
if (CachedRank) return CachedRank; // Rank already known?
-
+
// If this is an expression, return the 1+MAX(rank(LHS), rank(RHS)) so that
// we can reassociate expressions for code motion! Since we do not recurse
// for PHI nodes, we cannot have infinite recursion here, because there
@@ -130,7 +130,7 @@ unsigned Reassociate::getRank(Value *V) {
for (unsigned i = 0, e = I->getNumOperands();
i != e && Rank != MaxRank; ++i)
Rank = std::max(Rank, getRank(I->getOperand(i)));
-
+
// If this is a not or neg instruction, do not count it for rank. This
// assures us that X and ~X will have the same rank.
if (!I->getType()->isIntegral() ||
@@ -139,7 +139,7 @@ unsigned Reassociate::getRank(Value *V) {
//DEBUG(std::cerr << "Calculated Rank[" << V->getName() << "] = "
//<< Rank << "\n");
-
+
return CachedRank = Rank;
}
@@ -176,7 +176,7 @@ static Instruction *LowerNegateToMultiply(Instruction *Neg) {
void Reassociate::LinearizeExpr(BinaryOperator *I) {
BinaryOperator *LHS = cast<BinaryOperator>(I->getOperand(0));
BinaryOperator *RHS = cast<BinaryOperator>(I->getOperand(1));
- assert(isReassociableOp(LHS, I->getOpcode()) &&
+ assert(isReassociableOp(LHS, I->getOpcode()) &&
isReassociableOp(RHS, I->getOpcode()) &&
"Not an expression that needs linearization?");
@@ -190,7 +190,7 @@ void Reassociate::LinearizeExpr(BinaryOperator *I) {
I->setOperand(1, RHS->getOperand(0));
RHS->setOperand(0, LHS);
I->setOperand(0, RHS);
-
+
++NumLinear;
MadeChange = true;
DEBUG(std::cerr << "Linearized: " << *I);
@@ -363,7 +363,7 @@ static Instruction *BreakUpSubtract(Instruction *Sub) {
// Everyone now refers to the add instruction.
Sub->replaceAllUsesWith(New);
Sub->eraseFromParent();
-
+
DEBUG(std::cerr << "Negated: " << *New);
return New;
}
@@ -536,7 +536,7 @@ void Reassociate::OptimizeExpression(unsigned Opcode,
//case Instruction::Mul:
}
- if (IterateOptimization)
+ if (IterateOptimization)
OptimizeExpression(Opcode, Ops);
}
@@ -590,13 +590,13 @@ void Reassociate::ReassociateBB(BasicBlock *BB) {
// If this instruction is a commutative binary operator, process it.
if (!BI->isAssociative()) continue;
BinaryOperator *I = cast<BinaryOperator>(BI);
-
+
// If this is an interior node of a reassociable tree, ignore it until we
// get to the root of the tree, to avoid N^2 analysis.
if (I->hasOneUse() && isReassociableOp(I->use_back(), I->getOpcode()))
continue;
- // First, walk the expression tree, linearizing the tree, collecting
+ // First, walk the expression tree, linearizing the tree, collecting
std::vector<ValueEntry> Ops;
LinearizeExprTree(I, Ops);
@@ -619,7 +619,7 @@ void Reassociate::ReassociateBB(BasicBlock *BB) {
// this is a multiply tree used only by an add, and the immediate is a -1.
// In this case we reassociate to put the negation on the outside so that we
// can fold the negation into the add: (-X)*Y + Z -> Z-X*Y
- if (I->getOpcode() == Instruction::Mul && I->hasOneUse() &&
+ if (I->getOpcode() == Instruction::Mul && I->hasOneUse() &&
cast<Instruction>(I->use_back())->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Ops.back().Op) &&
cast<ConstantInt>(Ops.back().Op)->isAllOnesValue()) {
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 0710efdea3..61f00f13da 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -117,7 +117,7 @@ bool TailCallElim::runOnFunction(Function &F) {
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
if (!FunctionContainsEscapingAllocas)
FunctionContainsEscapingAllocas = CheckForEscapingAllocas(BB);
-
+
if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator()))
MadeChange |= ProcessReturningBlock(Ret, OldEntry, ArgumentPHIs);
}