aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms
diff options
context:
space:
mode:
authorDuncan Sands <baldrick@free.fr>2010-02-16 11:11:14 +0000
committerDuncan Sands <baldrick@free.fr>2010-02-16 11:11:14 +0000
commit1df9859c40492511b8aa4321eb76496005d3b75b (patch)
tree3e65bf258ff243ac3c149c418c7f201fbc9097d6 /lib/Transforms
parent30fb00aac02682cf1edef9f89b905621aa7a3c04 (diff)
There are two ways of checking for a given type, for example isa<PointerType>(T)
and T->isPointerTy(). Convert most instances of the first form to the second form. Requested by Chris. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@96344 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp4
-rw-r--r--lib/Transforms/IPO/DeadArgumentElimination.cpp4
-rw-r--r--lib/Transforms/IPO/FunctionAttrs.cpp8
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp22
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp28
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombinePHI.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineVectorOps.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp8
-rw-r--r--lib/Transforms/Scalar/ABCD.cpp4
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp4
-rw-r--r--lib/Transforms/Scalar/GVN.cpp48
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp4
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp4
-rw-r--r--lib/Transforms/Scalar/LICM.cpp4
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp2
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp2
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp42
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp36
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp318
-rw-r--r--lib/Transforms/Utils/AddrModeMatcher.cpp8
-rw-r--r--lib/Transforms/Utils/Local.cpp4
-rw-r--r--lib/Transforms/Utils/PromoteMemoryToRegister.cpp12
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp10
30 files changed, 311 insertions, 311 deletions
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index 325d353f39..e769d17cda 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -124,7 +124,7 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
unsigned ArgNo = 0;
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
I != E; ++I, ++ArgNo)
- if (isa<PointerType>(I->getType()))
+ if (I->getType()->isPointerTy())
PointerArgs.push_back(std::pair<Argument*, unsigned>(I, ArgNo));
if (PointerArgs.empty()) return 0;
@@ -673,7 +673,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
IE = SI->end(); II != IE; ++II) {
// Use i32 to index structs, and i64 for others (pointers/arrays).
// This satisfies GEP constraints.
- const Type *IdxTy = (isa<StructType>(ElTy) ?
+ const Type *IdxTy = (ElTy->isStructTy() ?
Type::getInt32Ty(F->getContext()) :
Type::getInt64Ty(F->getContext()));
Ops.push_back(ConstantInt::get(IdxTy, *II));
diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 1749b1eff3..f386ed78b5 100644
--- a/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -796,7 +796,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
// Replace by null for now.
Call->replaceAllUsesWith(Constant::getNullValue(Call->getType()));
} else {
- assert(isa<StructType>(RetTy) &&
+ assert(RetTy->isStructTy() &&
"Return type changed, but not into a void. The old return type"
" must have been a struct!");
Instruction *InsertPt = Call;
@@ -870,7 +870,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
if (NFTy->getReturnType() == Type::getVoidTy(F->getContext())) {
RetVal = 0;
} else {
- assert (isa<StructType>(RetTy));
+ assert (RetTy->isStructTy());
// The original return value was a struct, insert
// extractvalue/insertvalue chains to extract only the values we need
// to return and insert them into our new result.
diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp
index 64a6d78096..298d5cf391 100644
--- a/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -175,7 +175,7 @@ bool FunctionAttrs::AddReadAttrs(const std::vector<CallGraphNode *> &SCC) {
for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
CI != CE; ++CI) {
Value *Arg = *CI;
- if (isa<PointerType>(Arg->getType()) && !PointsToLocalMemory(Arg))
+ if (Arg->getType()->isPointerTy() && !PointsToLocalMemory(Arg))
// Writes memory. Just give up.
return false;
}
@@ -257,7 +257,7 @@ bool FunctionAttrs::AddNoCaptureAttrs(const std::vector<CallGraphNode *> &SCC) {
continue;
for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A!=E; ++A)
- if (isa<PointerType>(A->getType()) && !A->hasNoCaptureAttr() &&
+ if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr() &&
!PointerMayBeCaptured(A, true, /*StoreCaptures=*/false)) {
A->addAttr(Attribute::NoCapture);
++NumNoCapture;
@@ -362,7 +362,7 @@ bool FunctionAttrs::AddNoAliasAttrs(const std::vector<CallGraphNode *> &SCC) {
// We annotate noalias return values, which are only applicable to
// pointer types.
- if (!isa<PointerType>(F->getReturnType()))
+ if (!F->getReturnType()->isPointerTy())
continue;
if (!IsFunctionMallocLike(F, SCCNodes))
@@ -372,7 +372,7 @@ bool FunctionAttrs::AddNoAliasAttrs(const std::vector<CallGraphNode *> &SCC) {
bool MadeChange = false;
for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
Function *F = SCC[i]->getFunction();
- if (F->doesNotAlias(0) || !isa<PointerType>(F->getReturnType()))
+ if (F->doesNotAlias(0) || !F->getReturnType()->isPointerTy())
continue;
F->setDoesNotAlias(0);
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index df060eb234..d4aaf9e988 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -303,7 +303,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
Changed |= CleanupConstantGlobalUsers(CE, SubInit);
} else if (CE->getOpcode() == Instruction::BitCast &&
- isa<PointerType>(CE->getType())) {
+ CE->getType()->isPointerTy()) {
// Pointer cast, delete any stores and memsets to the global.
Changed |= CleanupConstantGlobalUsers(CE, 0);
}
@@ -431,7 +431,7 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
else if (const VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
NumElements = SubVectorTy->getNumElements();
else {
- assert(isa<StructType>(*GEPI) &&
+ assert((*GEPI)->isStructTy() &&
"Indexed GEP type is not array, vector, or struct!");
continue;
}
@@ -1556,7 +1556,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
// only has one (non-null) value stored into it, then we can optimize any
// users of the loaded value (often calls and loads) that would trap if the
// value was null.
- if (isa<PointerType>(GV->getInitializer()->getType()) &&
+ if (GV->getInitializer()->getType()->isPointerTy() &&
GV->getInitializer()->isNullValue()) {
if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
if (GV->getInitializer()->getType() != SOVC->getType())
@@ -1591,7 +1591,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
// where v1 and v2 both require constant pool loads, a big loss.
if (GVElType == Type::getInt1Ty(GV->getContext()) ||
GVElType->isFloatingPointTy() ||
- isa<PointerType>(GVElType) || isa<VectorType>(GVElType))
+ GVElType->isPointerTy() || GVElType->isVectorTy())
return false;
// Walk the use list of the global seeing if all the uses are load or store.
@@ -2148,7 +2148,7 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
Elts[CI->getZExtValue()] =
EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
- if (isa<ArrayType>(Init->getType()))
+ if (Init->getType()->isArrayTy())
return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
else
return ConstantVector::get(&Elts[0], Elts.size());
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 5e47953d1e..8ddaa16ee0 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1618,7 +1618,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants.
// Don't do this for vector select idioms, the code generator doesn't handle
// them well yet.
- if (!isa<VectorType>(I.getType())) {
+ if (!I.getType()->isVectorTy()) {
if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D))
return Match;
if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C))
@@ -1755,7 +1755,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(I))
return &I;
- if (isa<VectorType>(I.getType()))
+ if (I.getType()->isVectorTy())
if (isa<ConstantAggregateZero>(Op1))
return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index d7efdcfa3b..b9445040c8 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -831,7 +831,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
const Type *OldRetTy = Caller->getType();
const Type *NewRetTy = FT->getReturnType();
- if (isa<StructType>(NewRetTy))
+ if (NewRetTy->isStructTy())
return false; // TODO: Handle multiple return values.
// Check to see if we are changing the return type...
@@ -839,9 +839,9 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
if (Callee->isDeclaration() &&
// Conversion is ok if changing from one pointer type to another or from
// a pointer to an integer of the same size.
- !((isa<PointerType>(OldRetTy) || !TD ||
+ !((OldRetTy->isPointerTy() || !TD ||
OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
- (isa<PointerType>(NewRetTy) || !TD ||
+ (NewRetTy->isPointerTy() || !TD ||
NewRetTy == TD->getIntPtrType(Caller->getContext()))))
return false; // Cannot transform this return value.
@@ -888,9 +888,9 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// Converting from one pointer type to another or between a pointer and an
// integer of the same size is safe even if we do not have a body.
bool isConvertible = ActTy == ParamTy ||
- (TD && ((isa<PointerType>(ParamTy) ||
+ (TD && ((ParamTy->isPointerTy() ||
ParamTy == TD->getIntPtrType(Caller->getContext())) &&
- (isa<PointerType>(ActTy) ||
+ (ActTy->isPointerTy() ||
ActTy == TD->getIntPtrType(Caller->getContext()))));
if (Callee->isDeclaration() && !isConvertible) return false;
}
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index bb4a0e9496..a68fc6df47 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -272,7 +272,7 @@ bool InstCombiner::ShouldOptimizeCast(Instruction::CastOps opc, const Value *V,
// If this is a vector sext from a compare, then we don't want to break the
// idiom where each element of the extended vector is either zero or all ones.
- if (opc == Instruction::SExt && isa<CmpInst>(V) && isa<VectorType>(Ty))
+ if (opc == Instruction::SExt && isa<CmpInst>(V) && Ty->isVectorTy())
return false;
return true;
@@ -303,8 +303,8 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
if (isa<PHINode>(Src)) {
// We don't do this if this would create a PHI node with an illegal type if
// it is currently legal.
- if (!isa<IntegerType>(Src->getType()) ||
- !isa<IntegerType>(CI.getType()) ||
+ if (!Src->getType()->isIntegerTy() ||
+ !CI.getType()->isIntegerTy() ||
ShouldChangeType(CI.getType(), Src->getType()))
if (Instruction *NV = FoldOpIntoPhi(CI))
return NV;
@@ -436,7 +436,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
// type. Only do this if the dest type is a simple type, don't convert the
// expression tree to something weird like i93 unless the source is also
// strange.
- if ((isa<VectorType>(DestTy) || ShouldChangeType(SrcTy, DestTy)) &&
+ if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
CanEvaluateTruncated(Src, DestTy)) {
// If this cast is a truncate, evaluting in a different type always
@@ -728,7 +728,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
// expression tree to something weird like i93 unless the source is also
// strange.
unsigned BitsToClear;
- if ((isa<VectorType>(DestTy) || ShouldChangeType(SrcTy, DestTy)) &&
+ if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
CanEvaluateZExtd(Src, DestTy, BitsToClear)) {
assert(BitsToClear < SrcTy->getScalarSizeInBits() &&
"Unreasonable BitsToClear");
@@ -936,7 +936,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// type. Only do this if the dest type is a simple type, don't convert the
// expression tree to something weird like i93 unless the source is also
// strange.
- if ((isa<VectorType>(DestTy) || ShouldChangeType(SrcTy, DestTy)) &&
+ if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
CanEvaluateSExtd(Src, DestTy)) {
// Okay, we can transform this! Insert the new expression now.
DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
@@ -1289,7 +1289,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
Constant::getNullValue(Type::getInt32Ty(CI.getContext()));
unsigned NumZeros = 0;
while (SrcElTy != DstElTy &&
- isa<CompositeType>(SrcElTy) && !isa<PointerType>(SrcElTy) &&
+ isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() &&
SrcElTy->getNumContainedTypes() /* not "{}" */) {
SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
++NumZeros;
@@ -1304,7 +1304,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
}
if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
- if (DestVTy->getNumElements() == 1 && !isa<VectorType>(SrcTy)) {
+ if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) {
Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
@@ -1313,7 +1313,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
}
if (const VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
- if (SrcVTy->getNumElements() == 1 && !isa<VectorType>(DestTy)) {
+ if (SrcVTy->getNumElements() == 1 && !DestTy->isVectorTy()) {
Value *Elem =
Builder->CreateExtractElement(Src,
Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
@@ -1324,7 +1324,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
// Okay, we have (bitcast (shuffle ..)). Check to see if this is
// a bitconvert to a vector with the same # elts.
- if (SVI->hasOneUse() && isa<VectorType>(DestTy) &&
+ if (SVI->hasOneUse() && DestTy->isVectorTy() &&
cast<VectorType>(DestTy)->getNumElements() ==
SVI->getType()->getNumElements() &&
SVI->getType()->getNumElements() ==
@@ -1346,7 +1346,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
}
}
- if (isa<PointerType>(SrcTy))
+ if (SrcTy->isPointerTy())
return commonPointerCastTransforms(CI);
return commonCastTransforms(CI);
}
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 72af80fbe1..5a1cb364ca 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1988,7 +1988,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// values. If the ptr->ptr cast can be stripped off both arguments, we do so
// now.
if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
- if (isa<PointerType>(Op0->getType()) &&
+ if (Op0->getType()->isPointerTy() &&
(isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
// We keep moving the cast from the left operand over to the right
// operand, where it can often be eliminated completely.
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index e6c59c7d38..0f2a24f59b 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -87,8 +87,8 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
const Type *SrcPTy = SrcTy->getElementType();
- if (DestPTy->isIntegerTy() || isa<PointerType>(DestPTy) ||
- isa<VectorType>(DestPTy)) {
+ if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
+ DestPTy->isVectorTy()) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
// constants.
@@ -104,11 +104,11 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
}
if (IC.getTargetData() &&
- (SrcPTy->isIntegerTy() || isa<PointerType>(SrcPTy) ||
- isa<VectorType>(SrcPTy)) &&
+ (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
+ SrcPTy->isVectorTy()) &&
// Do not allow turning this into a load of an integer, which is then
// casted to a pointer, this pessimizes pointer analysis a lot.
- (isa<PointerType>(SrcPTy) == isa<PointerType>(LI.getType())) &&
+ (SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) &&
IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
@@ -243,7 +243,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
const Type *SrcPTy = SrcTy->getElementType();
- if (!DestPTy->isIntegerTy() && !isa<PointerType>(DestPTy))
+ if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
return 0;
/// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
@@ -255,7 +255,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
// constants.
- if (isa<ArrayType>(SrcPTy) || isa<StructType>(SrcPTy)) {
+ if (SrcPTy->isArrayTy() || SrcPTy->isStructTy()) {
// Index through pointer.
Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
NewGEPIndices.push_back(Zero);
@@ -277,7 +277,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
}
- if (!SrcPTy->isIntegerTy() && !isa<PointerType>(SrcPTy))
+ if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
return 0;
// If the pointers point into different address spaces or if they point to
@@ -297,11 +297,11 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
Instruction::CastOps opcode = Instruction::BitCast;
const Type* CastSrcTy = SIOp0->getType();
const Type* CastDstTy = SrcPTy;
- if (isa<PointerType>(CastDstTy)) {
+ if (CastDstTy->isPointerTy()) {
if (CastSrcTy->isIntegerTy())
opcode = Instruction::IntToPtr;
- } else if (isa<IntegerType>(CastDstTy)) {
- if (isa<PointerType>(SIOp0->getType()))
+ } else if (CastDstTy->isIntegerTy()) {
+ if (SIOp0->getType()->isPointerTy())
opcode = Instruction::PtrToInt;
}
@@ -413,7 +413,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
// Don't count debug info directives, lest they affect codegen,
// and we skip pointer-to-pointer bitcasts, which are NOPs.
if (isa<DbgInfoIntrinsic>(BBI) ||
- (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
+ (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
ScanInsts++;
continue;
}
@@ -483,7 +483,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
do {
++BBI;
} while (isa<DbgInfoIntrinsic>(BBI) ||
- (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType())));
+ (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
if (BI->isUnconditional())
if (SimplifyStoreAtEndOfBlock(SI))
@@ -544,7 +544,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
--BBI;
// Skip over debugging info.
while (isa<DbgInfoIntrinsic>(BBI) ||
- (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
+ (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
if (BBI==OtherBB->begin())
return false;
--BBI;
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 668c34fc06..380e1f87b0 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -76,7 +76,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
return BinaryOperator::CreateShl(Op0,
ConstantInt::get(Op0->getType(), Val.logBase2()));
}
- } else if (isa<VectorType>(Op1C->getType())) {
+ } else if (Op1C->getType()->isVectorTy()) {
if (Op1C->isNullValue())
return ReplaceInstUsesWith(I, Op1C);
@@ -173,7 +173,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
// If one of the operands of the multiply is a cast from a boolean value, then
// we know the bool is either zero or one, so this is a 'masking' multiply.
// X * Y (where Y is 0 or 1) -> X & (0-Y)
- if (!isa<VectorType>(I.getType())) {
+ if (!I.getType()->isVectorTy()) {
// -2 is "-1 << 1" so it is all bits set except the low one.
APInt Negative2(I.getType()->getPrimitiveSizeInBits(), (uint64_t)-2, true);
@@ -204,7 +204,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
// ANSI says we can drop signals, so we can do this anyway." (from GCC)
if (Op1F->isExactlyValue(1.0))
return ReplaceInstUsesWith(I, Op0); // Eliminate 'mul double %X, 1.0'
- } else if (isa<VectorType>(Op1C->getType())) {
+ } else if (Op1C->getType()->isVectorTy()) {
if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
// As above, vector X*splat(1.0) -> X in all defined cases.
if (Constant *Splat = Op1V->getSplatValue()) {
diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp
index bb7632fd2d..fba83542cd 100644
--- a/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -371,7 +371,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
// Be careful about transforming integer PHIs. We don't want to pessimize
// the code by turning an i32 into an i1293.
- if (isa<IntegerType>(PN.getType()) && isa<IntegerType>(CastSrcTy)) {
+ if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
if (!ShouldChangeType(PN.getType(), CastSrcTy))
return 0;
}
@@ -832,7 +832,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
// it is only used by trunc or trunc(lshr) operations. If so, we split the
// PHI into the various pieces being extracted. This sort of thing is
// introduced when SROA promotes an aggregate to a single large integer type.
- if (isa<IntegerType>(PN.getType()) && TD &&
+ if (PN.getType()->isIntegerTy() && TD &&
!TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
return Res;
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 5e9a52f77d..cd41844c33 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -104,7 +104,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
assert(Depth <= 6 && "Limit Search Depth");
uint32_t BitWidth = DemandedMask.getBitWidth();
const Type *VTy = V->getType();
- assert((TD || !isa<PointerType>(VTy)) &&
+ assert((TD || !VTy->isPointerTy()) &&
"SimplifyDemandedBits needs to know bit widths!");
assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
(!VTy->isIntOrIntVectorTy() ||
@@ -413,7 +413,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
} else
// Don't touch a scalar-to-vector bitcast.
return 0;
- } else if (isa<VectorType>(I->getOperand(0)->getType()))
+ } else if (I->getOperand(0)->getType()->isVectorTy())
// Don't touch a vector-to-scalar bitcast.
return 0;
diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 20fda1a271..a58124d703 100644
--- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -78,7 +78,7 @@ static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
/// value is already around as a register, for example if it were inserted then
/// extracted from the vector.
static Value *FindScalarElement(Value *V, unsigned EltNo) {
- assert(isa<VectorType>(V->getType()) && "Not looking at a vector?");
+ assert(V->getType()->isVectorTy() && "Not looking at a vector?");
const VectorType *PTy = cast<VectorType>(V->getType());
unsigned Width = PTy->getNumElements();
if (EltNo >= Width) // Out of range access.
@@ -322,7 +322,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
/// that computes V and the LHS value of the shuffle.
static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
Value *&RHS) {
- assert(isa<VectorType>(V->getType()) &&
+ assert(V->getType()->isVectorTy() &&
(RHS == 0 || V->getType() == RHS->getType()) &&
"Invalid shuffle!");
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 96c03428bd..af9ec5cacf 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -73,7 +73,7 @@ void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
/// from 'From' to 'To'. We don't want to convert from a legal to an illegal
/// type for example, or from a smaller to a larger illegal type.
bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const {
- assert(isa<IntegerType>(From) && isa<IntegerType>(To));
+ assert(From->isIntegerTy() && To->isIntegerTy());
// If we don't have TD, we don't know if the source/dest are legal.
if (!TD) return false;
@@ -478,7 +478,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
bool EndsWithSequential = false;
for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
I != E; ++I)
- EndsWithSequential = !isa<StructType>(*I);
+ EndsWithSequential = !(*I)->isStructTy();
// Can we combine the two pointer arithmetics offsets?
if (EndsWithSequential) {
@@ -578,7 +578,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
const Type *SrcElTy = StrippedPtrTy->getElementType();
const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
- if (TD && isa<ArrayType>(SrcElTy) &&
+ if (TD && SrcElTy->isArrayTy() &&
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
TD->getTypeAllocSize(ResElTy)) {
Value *Idx[2];
@@ -596,7 +596,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// (where tmp = 8*tmp2) into:
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
- if (TD && isa<ArrayType>(SrcElTy) && ResElTy->isIntegerTy(8)) {
+ if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
uint64_t ArrayEltSize =
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
diff --git a/lib/Transforms/Scalar/ABCD.cpp b/lib/Transforms/Scalar/ABCD.cpp
index cf5e8c07a5..ea8e5c3e53 100644
--- a/lib/Transforms/Scalar/ABCD.cpp
+++ b/lib/Transforms/Scalar/ABCD.cpp
@@ -505,7 +505,7 @@ void