aboutsummaryrefslogtreecommitdiff
path: root/lib/Transforms
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2007-01-15 02:27:26 +0000
committerChris Lattner <sabre@nondot.org>2007-01-15 02:27:26 +0000
commit42a75517250017a52afb03a0ade03cbd49559fe5 (patch)
treece6335dd133d9e2af752f558d4edd8f9d1fedefe /lib/Transforms
parentb25c4ca9d8c838c2f18009221b11cd5170c47702 (diff)
rename Type::isIntegral to Type::isInteger, eliminating the old Type::isInteger.
rename Type::getIntegralTypeMask to Type::getIntegerTypeMask. This makes naming much more consistent. For example, there are now no longer any instances of IntegerType that are not considered isInteger! :) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@33225 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/ExprTypeConvert.cpp12
-rw-r--r--lib/Transforms/IPO/DeadTypeElimination.cpp4
-rw-r--r--lib/Transforms/IPO/SimplifyLibCalls.cpp10
-rw-r--r--lib/Transforms/Scalar/CorrelatedExprs.cpp2
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp6
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp108
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp2
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp2
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp14
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp2
10 files changed, 81 insertions, 81 deletions
diff --git a/lib/Transforms/ExprTypeConvert.cpp b/lib/Transforms/ExprTypeConvert.cpp
index 63f3209401..1ed804e23d 100644
--- a/lib/Transforms/ExprTypeConvert.cpp
+++ b/lib/Transforms/ExprTypeConvert.cpp
@@ -69,19 +69,19 @@ bool llvm::ExpressionConvertibleToType(Value *V, const Type *Ty,
case Instruction::Add:
case Instruction::Sub:
- if (!Ty->isIntegral() && !Ty->isFloatingPoint()) return false;
+ if (!Ty->isInteger() && !Ty->isFloatingPoint()) return false;
if (!ExpressionConvertibleToType(I->getOperand(0), Ty, CTMap, TD) ||
!ExpressionConvertibleToType(I->getOperand(1), Ty, CTMap, TD))
return false;
break;
case Instruction::LShr:
case Instruction::AShr:
- if (!Ty->isIntegral()) return false;
+ if (!Ty->isInteger()) return false;
if (!ExpressionConvertibleToType(I->getOperand(0), Ty, CTMap, TD))
return false;
break;
case Instruction::Shl:
- if (!Ty->isIntegral()) return false;
+ if (!Ty->isInteger()) return false;
if (!ExpressionConvertibleToType(I->getOperand(0), Ty, CTMap, TD))
return false;
break;
@@ -458,7 +458,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
case Instruction::Add:
case Instruction::Sub: {
- if (!Ty->isIntegral() && !Ty->isFloatingPoint()) return false;
+ if (!Ty->isInteger() && !Ty->isFloatingPoint()) return false;
Value *OtherOp = I->getOperand((V == I->getOperand(0)) ? 1 : 0);
return ValueConvertibleToType(I, Ty, CTMap, TD) &&
@@ -476,7 +476,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
case Instruction::AShr:
case Instruction::Shl:
if (I->getOperand(1) == V) return false; // Cannot change shift amount type
- if (!Ty->isIntegral()) return false;
+ if (!Ty->isInteger()) return false;
return ValueConvertibleToType(I, Ty, CTMap, TD);
case Instruction::Free:
@@ -576,7 +576,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
// Can convert store if the incoming value is convertible and if the
// result will preserve semantics...
const Type *Op0Ty = I->getOperand(0)->getType();
- if (!(Op0Ty->isIntegral() ^ ElTy->isIntegral()) &&
+ if (!(Op0Ty->isInteger() ^ ElTy->isInteger()) &&
!(Op0Ty->isFloatingPoint() ^ ElTy->isFloatingPoint()))
return ExpressionConvertibleToType(I->getOperand(0), ElTy, CTMap, TD);
}
diff --git a/lib/Transforms/IPO/DeadTypeElimination.cpp b/lib/Transforms/IPO/DeadTypeElimination.cpp
index b599e5a089..18724bb9db 100644
--- a/lib/Transforms/IPO/DeadTypeElimination.cpp
+++ b/lib/Transforms/IPO/DeadTypeElimination.cpp
@@ -52,13 +52,13 @@ ModulePass *llvm::createDeadTypeEliminationPass() {
//
static inline bool ShouldNukeSymtabEntry(const Type *Ty){
// Nuke all names for primitive types!
- if (Ty->isPrimitiveType() || Ty->isIntegral())
+ if (Ty->isPrimitiveType() || Ty->isInteger())
return true;
// Nuke all pointers to primitive types as well...
if (const PointerType *PT = dyn_cast<PointerType>(Ty))
if (PT->getElementType()->isPrimitiveType() ||
- PT->getElementType()->isIntegral())
+ PT->getElementType()->isInteger())
return true;
return false;
diff --git a/lib/Transforms/IPO/SimplifyLibCalls.cpp b/lib/Transforms/IPO/SimplifyLibCalls.cpp
index ea57ab0a21..db0c492f72 100644
--- a/lib/Transforms/IPO/SimplifyLibCalls.cpp
+++ b/lib/Transforms/IPO/SimplifyLibCalls.cpp
@@ -398,7 +398,7 @@ struct ExitInMainOptimization : public LibCallOptimization {
// Make sure the called function looks like exit (int argument, int return
// type, external linkage, not varargs).
virtual bool ValidateCalledFunction(const Function *F, SimplifyLibCalls &SLC){
- return F->arg_size() >= 1 && F->arg_begin()->getType()->isIntegral();
+ return F->arg_size() >= 1 && F->arg_begin()->getType()->isInteger();
}
virtual bool OptimizeCall(CallInst* ci, SimplifyLibCalls& SLC) {
@@ -960,8 +960,8 @@ struct memcmpOptimization : public LibCallOptimization {
Function::const_arg_iterator AI = F->arg_begin();
if (F->arg_size() != 3 || !isa<PointerType>(AI->getType())) return false;
if (!isa<PointerType>((++AI)->getType())) return false;
- if (!(++AI)->getType()->isIntegral()) return false;
- if (!F->getReturnType()->isIntegral()) return false;
+ if (!(++AI)->getType()->isInteger()) return false;
+ if (!F->getReturnType()->isInteger()) return false;
return true;
}
@@ -1725,8 +1725,8 @@ public:
: LibCallOptimization("isascii", "Number of 'isascii' calls simplified") {}
virtual bool ValidateCalledFunction(const Function *F, SimplifyLibCalls &SLC){
- return F->arg_size() == 1 && F->arg_begin()->getType()->isIntegral() &&
- F->getReturnType()->isIntegral();
+ return F->arg_size() == 1 && F->arg_begin()->getType()->isInteger() &&
+ F->getReturnType()->isInteger();
}
/// @brief Perform the isascii optimization.
diff --git a/lib/Transforms/Scalar/CorrelatedExprs.cpp b/lib/Transforms/Scalar/CorrelatedExprs.cpp
index 26832b22a9..e58bcf454a 100644
--- a/lib/Transforms/Scalar/CorrelatedExprs.cpp
+++ b/lib/Transforms/Scalar/CorrelatedExprs.cpp
@@ -111,7 +111,7 @@ namespace {
Value *Replacement;
public:
ValueInfo(const Type *Ty)
- : Bounds(Ty->isIntegral() ? Ty : Type::Int32Ty), Replacement(0) {}
+ : Bounds(Ty->isInteger() ? Ty : Type::Int32Ty), Replacement(0) {}
// getBounds() - Return the constant bounds of the value...
const ConstantRange &getBounds() const { return Bounds; }
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index bcd7ed808d..adbc29613d 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -325,7 +325,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L) {
if (LI->getLoopFor(L->getBlocks()[i]) == L) { // Not in a subloop...
BasicBlock *BB = L->getBlocks()[i];
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
- if (I->getType()->isIntegral()) { // Is an integer instruction
+ if (I->getType()->isInteger()) { // Is an integer instruction
SCEVHandle SH = SE->getSCEV(I);
if (SH->hasComputableLoopEvolution(L) || // Varies predictably
HasConstantItCount) {
@@ -460,7 +460,7 @@ void IndVarSimplify::runOnLoop(Loop *L) {
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
PHINode *PN = cast<PHINode>(I);
- if (PN->getType()->isIntegral()) { // FIXME: when we have fast-math, enable!
+ if (PN->getType()->isInteger()) { // FIXME: when we have fast-math, enable!
SCEVHandle SCEV = SE->getSCEV(PN);
if (SCEV->hasComputableLoopEvolution(L))
// FIXME: It is an extremely bad idea to indvar substitute anything more
@@ -574,7 +574,7 @@ void IndVarSimplify::runOnLoop(Loop *L) {
if (LI->getLoopFor(L->getBlocks()[i]) == L) { // Not in a subloop...
BasicBlock *BB = L->getBlocks()[i];
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
- if (I->getType()->isIntegral() && // Is an integer instruction
+ if (I->getType()->isInteger() && // Is an integer instruction
!I->use_empty() &&
!Rewriter.isInsertedInstruction(I)) {
SCEVHandle SH = SE->getSCEV(I);
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index 95b8330318..f961ac25e9 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -495,7 +495,7 @@ static inline Value *dyn_castNotVal(Value *V) {
// Otherwise, return null.
//
static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
- if (V->hasOneUse() && V->getType()->isIntegral())
+ if (V->hasOneUse() && V->getType()->isInteger())
if (Instruction *I = dyn_cast<Instruction>(V)) {
if (I->getOpcode() == Instruction::Mul)
if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
@@ -558,7 +558,7 @@ static void ComputeMaskedBits(Value *V, uint64_t Mask, uint64_t &KnownZero,
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return;
- Mask &= V->getType()->getIntegralTypeMask();
+ Mask &= V->getType()->getIntegerTypeMask();
switch (I->getOpcode()) {
case Instruction::And:
@@ -624,7 +624,7 @@ static void ComputeMaskedBits(Value *V, uint64_t Mask, uint64_t &KnownZero,
return;
case Instruction::BitCast: {
const Type *SrcTy = I->getOperand(0)->getType();
- if (SrcTy->isIntegral()) {
+ if (SrcTy->isInteger()) {
ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
return;
}
@@ -633,10 +633,10 @@ static void ComputeMaskedBits(Value *V, uint64_t Mask, uint64_t &KnownZero,
case Instruction::ZExt: {
// Compute the bits in the result that are not present in the input.
const Type *SrcTy = I->getOperand(0)->getType();
- uint64_t NotIn = ~SrcTy->getIntegralTypeMask();
- uint64_t NewBits = I->getType()->getIntegralTypeMask() & NotIn;
+ uint64_t NotIn = ~SrcTy->getIntegerTypeMask();
+ uint64_t NewBits = I->getType()->getIntegerTypeMask() & NotIn;
- Mask &= SrcTy->getIntegralTypeMask();
+ Mask &= SrcTy->getIntegerTypeMask();
ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
// The top bits are known to be zero.
@@ -646,10 +646,10 @@ static void ComputeMaskedBits(Value *V, uint64_t Mask, uint64_t &KnownZero,
case Instruction::SExt: {
// Compute the bits in the result that are not present in the input.
const Type *SrcTy = I->getOperand(0)->getType();
- uint64_t NotIn = ~SrcTy->getIntegralTypeMask();
- uint64_t NewBits = I->getType()->getIntegralTypeMask() & NotIn;
+ uint64_t NotIn = ~SrcTy->getIntegerTypeMask();
+ uint64_t NewBits = I->getType()->getIntegerTypeMask() & NotIn;
- Mask &= SrcTy->getIntegralTypeMask();
+ Mask &= SrcTy->getIntegerTypeMask();
ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
@@ -766,7 +766,7 @@ static void ComputeSignedMinMaxValuesFromKnownBits(const Type *Ty,
uint64_t KnownZero,
uint64_t KnownOne,
int64_t &Min, int64_t &Max) {
- uint64_t TypeBits = Ty->getIntegralTypeMask();
+ uint64_t TypeBits = Ty->getIntegerTypeMask();
uint64_t UnknownBits = ~(KnownZero|KnownOne) & TypeBits;
uint64_t SignBit = 1ULL << (Ty->getPrimitiveSizeInBits()-1);
@@ -796,7 +796,7 @@ static void ComputeUnsignedMinMaxValuesFromKnownBits(const Type *Ty,
uint64_t KnownOne,
uint64_t &Min,
uint64_t &Max) {
- uint64_t TypeBits = Ty->getIntegralTypeMask();
+ uint64_t TypeBits = Ty->getIntegerTypeMask();
uint64_t UnknownBits = ~(KnownZero|KnownOne) & TypeBits;
// The minimum value is when the unknown bits are all zeros.
@@ -831,7 +831,7 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
}
// If this is the root being simplified, allow it to have multiple uses,
// just set the DemandedMask to all bits.
- DemandedMask = V->getType()->getIntegralTypeMask();
+ DemandedMask = V->getType()->getIntegerTypeMask();
} else if (DemandedMask == 0) { // Not demanding any bits from V.
if (V != UndefValue::get(V->getType()))
return UpdateValueUsesWith(V, UndefValue::get(V->getType()));
@@ -843,7 +843,7 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false; // Only analyze instructions.
- DemandedMask &= V->getType()->getIntegralTypeMask();
+ DemandedMask &= V->getType()->getIntegerTypeMask();
uint64_t KnownZero2 = 0, KnownOne2 = 0;
switch (I->getOpcode()) {
@@ -1001,7 +1001,7 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
break;
case Instruction::BitCast:
- if (!I->getOperand(0)->getType()->isIntegral())
+ if (!I->getOperand(0)->getType()->isInteger())
return false;
if (SimplifyDemandedBits(I->getOperand(0), DemandedMask,
@@ -1012,10 +1012,10 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
case Instruction::ZExt: {
// Compute the bits in the result that are not present in the input.
const Type *SrcTy = I->getOperand(0)->getType();
- uint64_t NotIn = ~SrcTy->getIntegralTypeMask();
- uint64_t NewBits = I->getType()->getIntegralTypeMask() & NotIn;
+ uint64_t NotIn = ~SrcTy->getIntegerTypeMask();
+ uint64_t NewBits = I->getType()->getIntegerTypeMask() & NotIn;
- DemandedMask &= SrcTy->getIntegralTypeMask();
+ DemandedMask &= SrcTy->getIntegerTypeMask();
if (SimplifyDemandedBits(I->getOperand(0), DemandedMask,
KnownZero, KnownOne, Depth+1))
return true;
@@ -1027,12 +1027,12 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
case Instruction::SExt: {
// Compute the bits in the result that are not present in the input.
const Type *SrcTy = I->getOperand(0)->getType();
- uint64_t NotIn = ~SrcTy->getIntegralTypeMask();
- uint64_t NewBits = I->getType()->getIntegralTypeMask() & NotIn;
+ uint64_t NotIn = ~SrcTy->getIntegerTypeMask();
+ uint64_t NewBits = I->getType()->getIntegerTypeMask() & NotIn;
// Get the sign bit for the source type
uint64_t InSignBit = 1ULL << (SrcTy->getPrimitiveSizeInBits()-1);
- int64_t InputDemandedBits = DemandedMask & SrcTy->getIntegralTypeMask();
+ int64_t InputDemandedBits = DemandedMask & SrcTy->getIntegerTypeMask();
// If any of the sign extended bits are demanded, we know that the sign
// bit is demanded.
@@ -1174,7 +1174,7 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
// Compute the new bits that are at the top now.
uint64_t HighBits = (1ULL << ShiftAmt)-1;
HighBits <<= I->getType()->getPrimitiveSizeInBits() - ShiftAmt;
- uint64_t TypeMask = I->getType()->getIntegralTypeMask();
+ uint64_t TypeMask = I->getType()->getIntegerTypeMask();
// Unsigned shift right.
if (SimplifyDemandedBits(I->getOperand(0),
(DemandedMask << ShiftAmt) & TypeMask,
@@ -1207,7 +1207,7 @@ bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
// Compute the new bits that are at the top now.
uint64_t HighBits = (1ULL << ShiftAmt)-1;
HighBits <<= I->getType()->getPrimitiveSizeInBits() - ShiftAmt;
- uint64_t TypeMask = I->getType()->getIntegralTypeMask();
+ uint64_t TypeMask = I->getType()->getIntegerTypeMask();
// Signed shift right.
if (SimplifyDemandedBits(I->getOperand(0),
(DemandedMask << ShiftAmt) & TypeMask,
@@ -1745,7 +1745,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// (X & 254)+1 -> (X&254)|1
uint64_t KnownZero, KnownOne;
if (!isa<PackedType>(I.getType()) &&
- SimplifyDemandedBits(&I, I.getType()->getIntegralTypeMask(),
+ SimplifyDemandedBits(&I, I.getType()->getIntegerTypeMask(),
KnownZero, KnownOne))
return &I;
}
@@ -1780,7 +1780,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// This is a sign extend if the top bits are known zero.
uint64_t Mask = ~0ULL;
Mask <<= 64-(TySizeBits-Size);
- Mask &= XorLHS->getType()->getIntegralTypeMask();
+ Mask &= XorLHS->getType()->getIntegerTypeMask();
if (!MaskedValueIsZero(XorLHS, Mask))
Size = 0; // Not a sign ext, but can't be any others either.
goto FoundSExt;
@@ -1808,7 +1808,7 @@ FoundSExt:
}
// X + X --> X << 1
- if (I.getType()->isIntegral() && I.getType() != Type::Int1Ty) {
+ if (I.getType()->isInteger() && I.getType() != Type::Int1Ty) {
if (Instruction *Result = AssociativeOpt(I, AddRHS(RHS))) return Result;
if (Instruction *RHSI = dyn_cast<Instruction>(RHS)) {
@@ -1876,7 +1876,7 @@ FoundSExt:
// Form a mask of all bits from the lowest bit added through the top.
uint64_t AddRHSHighBits = ~((AddRHSV & -AddRHSV)-1);
- AddRHSHighBits &= C2->getType()->getIntegralTypeMask();
+ AddRHSHighBits &= C2->getType()->getIntegerTypeMask();
// See if the and mask includes all of these bits.
uint64_t AddRHSHighBitsAnd = AddRHSHighBits & C2->getZExtValue();
@@ -1933,7 +1933,7 @@ static Value *RemoveNoopCast(Value *V) {
if (CastInst *CI = dyn_cast<CastInst>(V)) {
const Type *CTy = CI->getType();
const Type *OpTy = CI->getOperand(0)->getType();
- if (CTy->isIntegral() && OpTy->isIntegral()) {
+ if (CTy->isInteger() && OpTy->isInteger()) {
if (CTy->getPrimitiveSizeInBits() == OpTy->getPrimitiveSizeInBits())
return RemoveNoopCast(CI->getOperand(0));
} else if (isa<PointerType>(CTy) && isa<PointerType>(OpTy))
@@ -2412,7 +2412,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
// If the sign bits of both operands are zero (i.e. we can prove they are
// unsigned inputs), turn this into a udiv.
- if (I.getType()->isIntegral()) {
+ if (I.getType()->isInteger()) {
uint64_t Mask = 1ULL << (I.getType()->getPrimitiveSizeInBits()-1);
if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
return BinaryOperator::createUDiv(Op0, Op1, I.getName());
@@ -2641,7 +2641,7 @@ static bool isMaxValueMinusOne(const ConstantInt *C, bool isSigned) {
Val >>= 64-TypeBits; // Shift out unwanted 1 bits...
return C->getSExtValue() == Val-1;
}
- return C->getZExtValue() == C->getType()->getIntegralTypeMask()-1;
+ return C->getZExtValue() == C->getType()->getIntegerTypeMask()-1;
}
// isMinValuePlusOne - return true if this is Min+1
@@ -2858,7 +2858,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
uint64_t AndRHSV = cast<ConstantInt>(AndRHS)->getZExtValue();
// Clear bits that are not part of the constant.
- AndRHSV &= AndRHS->getType()->getIntegralTypeMask();
+ AndRHSV &= AndRHS->getType()->getIntegerTypeMask();
// If there is only one bit set...
if (isOneBitSet(cast<ConstantInt>(AndRHS))) {
@@ -3044,7 +3044,7 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
// is all N is, ignore it.
unsigned MB, ME;
if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
- uint64_t Mask = RHS->getType()->getIntegralTypeMask();
+ uint64_t Mask = RHS->getType()->getIntegerTypeMask();
Mask >>= 64-MB+1;
if (MaskedValueIsZero(RHS, Mask))
break;
@@ -3083,13 +3083,13 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
if (!isa<PackedType>(I.getType()) &&
- SimplifyDemandedBits(&I, I.getType()->getIntegralTypeMask(),
+ SimplifyDemandedBits(&I, I.getType()->getIntegerTypeMask(),
KnownZero, KnownOne))
return &I;
if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
uint64_t AndRHSMask = AndRHS->getZExtValue();
- uint64_t TypeMask = Op0->getType()->getIntegralTypeMask();
+ uint64_t TypeMask = Op0->getType()->getIntegerTypeMask();
uint64_t NotAndRHS = AndRHSMask^TypeMask;
// Optimize a variety of ((val OP C1) & C2) combinations...
@@ -3386,7 +3386,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ?
const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegral() &&
+ if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
// Only do this if the casts both really cause code to be generated.
ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
I.getType(), TD) &&
@@ -3554,7 +3554,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
if (!isa<PackedType>(I.getType()) &&
- SimplifyDemandedBits(&I, I.getType()->getIntegralTypeMask(),
+ SimplifyDemandedBits(&I, I.getType()->getIntegerTypeMask(),
KnownZero, KnownOne))
return &I;
@@ -3836,7 +3836,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegral() &&
+ if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
// Only do this if the casts both really cause code to be generated.
ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
I.getType(), TD) &&
@@ -3882,7 +3882,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
if (!isa<PackedType>(I.getType()) &&
- SimplifyDemandedBits(&I, I.getType()->getIntegralTypeMask(),
+ SimplifyDemandedBits(&I, I.getType()->getIntegerTypeMask(),
KnownZero, KnownOne))
return &I;
@@ -4020,7 +4020,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegral() &&
+ if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
// Only do this if the casts both really cause code to be generated.
ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
I.getType(), TD) &&
@@ -4512,7 +4512,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// See if we can fold the comparison based on bits known to be zero or one
// in the input.
uint64_t KnownZero, KnownOne;
- if (SimplifyDemandedBits(Op0, Ty->getIntegralTypeMask(),
+ if (SimplifyDemandedBits(Op0, Ty->getIntegerTypeMask(),
KnownZero, KnownOne, 0))
return &I;
@@ -5062,7 +5062,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Value *CastOp = Cast->getOperand(0);
const Type *SrcTy = CastOp->getType();
unsigned SrcTySize = SrcTy->getPrimitiveSizeInBits();
- if (SrcTy->isIntegral() &&
+ if (SrcTy->isInteger() &&
SrcTySize == Cast->getType()->getPrimitiveSizeInBits()) {
// If this is an unsigned comparison, try to make the comparison use
// smaller constant values.
@@ -5436,7 +5436,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
- if (SimplifyDemandedBits(&I, I.getType()->getIntegralTypeMask(),
+ if (SimplifyDemandedBits(&I, I.getType()->getIntegerTypeMask(),
KnownZero, KnownOne))
return &I;
@@ -6038,7 +6038,7 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
// See if we can simplify any instructions used by the LHS whose sole
// purpose is to compute bits we don't care about.
uint64_t KnownZero = 0, KnownOne = 0;
- if (SimplifyDemandedBits(&CI, DestTy->getIntegralTypeMask(),
+ if (SimplifyDemandedBits(&CI, DestTy->getIntegerTypeMask(),
KnownZero, KnownOne))
return &CI;
@@ -6211,7 +6211,7 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
if (Op1CV == 0 || isPowerOf2_64(Op1CV)) {
// If Op1C some other power of two, convert:
uint64_t KnownZero, KnownOne;
- uint64_t TypeMask = Op1->getType()->getIntegralTypeMask();
+ uint64_t TypeMask = Op1->getType()->getIntegerTypeMask();
ComputeMaskedBits(Op0, TypeMask, KnownZero, KnownOne);
// This only works for EQ and NE
@@ -6333,7 +6333,7 @@ Instruction *InstCombiner::visitZExt(CastInst &CI) {
// If we're actually extending zero bits and the trunc is a no-op
if (MidSize < DstSize && SrcSize == DstSize) {
// Replace both of the casts with an And of the type mask.
- uint64_t AndValue = CSrc->getType()->getIntegralTypeMask();
+ uint64_t AndValue = CSrc->getType()->getIntegerTypeMask();
Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
Instruction *And =
BinaryOperator::createAnd(CSrc->getOperand(0), AndConst);
@@ -6395,7 +6395,7 @@ Instruction *InstCombiner::visitBitCast(CastInst &CI) {
const Type *SrcTy = Src->getType();
const Type *DestTy = CI.getType();
- if (SrcTy->isIntegral() && DestTy->isIntegral()) {
+ if (SrcTy->isInteger() && DestTy->isInteger()) {
if (Instruction *Result = commonIntCastTransforms(CI))
return Result;
} else {
@@ -6816,7 +6816,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
// See if we can fold the select into one of our operands.
- if (SI.getType()->isIntegral()) {
+ if (SI.getType()->isInteger()) {
// See the comment above GetSelectFoldableOperands for a description of the
// transformation we are doing here.
if (Instruction *TVI = dyn_cast<Instruction>(TrueVal))
@@ -7273,7 +7273,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
//Either we can cast directly, or we can upconvert the argument
bool isConvertible = ActTy == ParamTy ||
(isa<PointerType>(ParamTy) && isa<PointerType>(ActTy)) ||
- (ParamTy->isIntegral() && ActTy->isIntegral() &&
+ (ParamTy->isInteger() && ActTy->isInteger() &&
ParamTy->getPrimitiveSizeInBits() >= ActTy->getPrimitiveSizeInBits()) ||
(c && ParamTy->getPrimitiveSizeInBits() >= ActTy->getPrimitiveSizeInBits()
&& c->getSExtValue() > 0);
@@ -7667,7 +7667,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Value *Src = CI->getOperand(0);
const Type *SrcTy = Src->getType();
const Type *DestTy = CI->getType();
- if (Src->getType()->isIntegral()) {
+ if (Src->getType()->isInteger()) {
if (SrcTy->getPrimitiveSizeInBits() ==
DestTy->getPrimitiveSizeInBits()) {
// We can always eliminate a cast from ulong or long to the other.
@@ -7998,7 +7998,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI) {
if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
const Type *SrcPTy = SrcTy->getElementType();
- if (DestPTy->isIntegral() || isa<PointerType>(DestPTy) ||
+ if (DestPTy->isInteger() || isa<PointerType>(DestPTy) ||
isa<PackedType>(DestPTy)) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
@@ -8012,7 +8012,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI) {
SrcPTy = SrcTy->getElementType();
}
- if ((SrcPTy->isIntegral() || isa<PointerType>(SrcPTy) ||
+ if ((SrcPTy->isInteger() || isa<PointerType>(SrcPTy) ||
isa<PackedType>(SrcPTy)) &&
// Do not allow turning this into a load of an integer, which is then
// casted to a pointer, this pessimizes pointer analysis a lot.
@@ -8186,7 +8186,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
const Type *SrcPTy = SrcTy->getElementType();
- if (DestPTy->isIntegral() || isa<PointerType>(DestPTy)) {
+ if (DestPTy->isInteger() || isa<PointerType>(DestPTy)) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
// constants.
@@ -8199,7 +8199,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
SrcPTy = SrcTy->getElementType();
}
- if ((SrcPTy->isIntegral() || isa<PointerType>(SrcPTy)) &&
+ if ((SrcPTy->isInteger() || isa<PointerType>(SrcPTy)) &&
IC.getTargetData().getTypeSize(SrcPTy) ==
IC.getTargetData().getTypeSize(DestPTy)) {
@@ -8210,9 +8210,9 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
Instruction::CastOps opcode = Instruction::BitCast;
Value *SIOp0 = SI.getOperand(0);
if (isa<PointerType>(SrcPTy)) {
- if (SIOp0->getType()->isIntegral())
+ if (SIOp0->getType()->isInteger())
opcode = Instruction::IntToPtr;
- } else if (SrcPTy->isIntegral()) {
+ } else if (SrcPTy->isInteger()) {
if (isa<PointerType>(SIOp0->getType()))
opcode = Instruction::PtrToInt;
}
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index fcc5630ef8..798fb81190 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -398,7 +398,7 @@ static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV,
/// return true. Otherwise, return false.
bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L,
std::set<Instruction*> &Processed) {
- if (!I->getType()->isIntegral() && !isa<PointerType>(I->getType()))
+ if (!I->getType()->isInteger() && !isa<PointerType>(I->getType()))
return false; // Void and FP expressions cannot be reduced.
if (!Processed.insert(I).second)
return true; // Instruction already handled.
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index 7550a98475..287bff2a2f 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -164,7 +164,7 @@ unsigned Reassociate::getRank(Value *V) {
// If this is a not or neg instruction, do not count it for rank. This
// assures us that X and ~X will have the same rank.
- if (!I->getType()->isIntegral() ||
+ if (!I->getType()->isInteger() ||
(!BinaryOperator::isNot(I) && !BinaryOperator::isNeg(I)))
++Rank;
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 587e0e589b..e241c01e3b 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -442,7 +442,7 @@ static bool MergeInType(const Type *In, const Type *&Accum,
Accum = In;
} else if (In == Type::VoidTy) {
// Noop.
- } else if (In->isIntegral() && Accum->isIntegral()) { // integer union.
+ } else if (In->isInteger() && Accum->isInteger()) { // integer union.
// Otherwise pick whichever type is larger.
if (cast<IntegerType>(In)->getBitWidth() >
cast<IntegerType>(Accum)->getBitWidth())
@@ -472,7 +472,7 @@ static bool MergeInType(const Type *In, const Type *&Accum,
case Type::FloatTyID: Accum = Type::Int32Ty; break;
case Type::DoubleTyID: Accum = Type::Int64Ty; break;
default:
- assert(Accum->isIntegral() && "Unknown FP type!");
+ assert(Accum->isInteger() && "Unknown FP type!");
break;
}
@@ -481,7 +481,7 @@ static bool MergeInType(const Type *In, const Type *&Accum,
case Type::FloatTyID: In = Type::Int32Ty; break;
case Type::DoubleTyID: In = Type::Int64Ty; break;
default:
- assert(In->isIntegral() && "Unknown FP type!");
+ assert(In->isInteger() && "Unknown FP type!");
break;
}
return MergeInType(In, Accum, TD);
@@ -541,7 +541,7 @@ const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
IsNotTrivial = true;
const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial);
if (SubElt == 0) return 0;
- if (SubElt != Type::VoidTy && SubElt->isIntegral()) {
+ if (SubElt != Type::VoidTy && SubElt->isInteger()) {
const Type *NewTy =
getUIntAtLeastAsBitAs(TD.getTypeSize(SubElt)*8+BitOffset);
if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0;
@@ -653,7 +653,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
// an integer.
NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
} else {
- assert(NV->getType()->isIntegral() && "Unknown promotion!");
+ assert(NV->getType()->isInteger() && "Unknown promotion!");
if (Offset && Offset < TD.getTypeSize(NV->getType())*8) {
NV = new ShiftInst(Instruction::LShr, NV,
ConstantInt::get(Type::Int8Ty, Offset),
@@ -661,7 +661,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
}
// If the result is an integer, this is a trunc or bitcast.
- if (LI->getType()->isIntegral()) {
+ if (LI->getType()->isInteger()) {
NV = CastInst::createTruncOrBitCast(NV, LI->getType(),
LI->getName(), LI);
} else if (LI->getType()->isFloatingPoint()) {
@@ -748,7 +748,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
if (TotalBits != SrcSize) {
assert(TotalBits > SrcSize);
uint64_t Mask = ~(((1ULL << SrcSize)-1) << Offset);