diff options
Diffstat (limited to 'lib/Transforms')
-rw-r--r-- | lib/Transforms/IPO/ArgumentPromotion.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/IPO/GlobalOpt.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/Scalar/CodeGenPrepare.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/Scalar/DeadStoreElimination.cpp | 10 | ||||
-rw-r--r-- | lib/Transforms/Scalar/InstructionCombining.cpp | 28 | ||||
-rw-r--r-- | lib/Transforms/Scalar/LICM.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/Scalar/PredicateSimplifier.cpp | 7 | ||||
-rw-r--r-- | lib/Transforms/Utils/LowerAllocations.cpp | 2 |
8 files changed, 26 insertions, 29 deletions
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp index 85b29f871f..7479c8ee67 100644 --- a/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -277,7 +277,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg) const { const PointerType *LoadTy = cast<PointerType>(Load->getOperand(0)->getType()); - unsigned LoadSize = (unsigned)TD.getTypeSize(LoadTy->getElementType()); + unsigned LoadSize = (unsigned)TD.getTypeStoreSize(LoadTy->getElementType()); if (AA.canInstructionRangeModify(BB->front(), *Load, Arg, LoadSize)) return false; // Pointer is invalidated! diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 5d8f969f33..779b4a1871 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -1227,7 +1227,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, // (2048 bytes currently), as we don't want to introduce a 16M global or // something. if (NElements->getZExtValue()* - TD.getTypeSize(MI->getAllocatedType()) < 2048) { + TD.getABITypeSize(MI->getAllocatedType()) < 2048) { GVI = OptimizeGlobalAddressOfMalloc(GV, MI); return true; } diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index c2a0787236..0dad42f8e2 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -634,7 +634,7 @@ static bool FindMaximalLegalAddressingMode(Value *Addr, const Type *AccessTy, cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); ConstantOffset += SL->getElementOffset(Idx); } else { - uint64_t TypeSize = TD->getTypeSize(GTI.getIndexedType()); + uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType()); if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { ConstantOffset += CI->getSExtValue()*TypeSize; } else if (TypeSize) { // Scales of zero don't do anything. diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp index 2e1d9ade0a..e5c557c349 100644 --- a/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -137,8 +137,8 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { dep != MemoryDependenceAnalysis::NonLocal && isa<StoreInst>(dep)) { if (dep != last || - TD.getTypeSize(last->getOperand(0)->getType()) > - TD.getTypeSize(BBI->getOperand(0)->getType())) { + TD.getTypeStoreSize(last->getOperand(0)->getType()) > + TD.getTypeStoreSize(BBI->getOperand(0)->getType())) { dep = MD.getDependency(BBI, dep); continue; } @@ -210,7 +210,7 @@ bool DSE::handleFreeWithNonTrivialDependency(FreeInst* F, Instruction* dep, Value* depPointer = dependency->getPointerOperand(); const Type* depType = dependency->getOperand(0)->getType(); - unsigned depPointerSize = TD.getTypeSize(depType); + unsigned depPointerSize = TD.getTypeStoreSize(depType); // Check for aliasing AliasAnalysis::AliasResult A = AA.alias(F->getPointerOperand(), ~0UL, @@ -329,7 +329,7 @@ bool DSE::handleEndBlock(BasicBlock& BB, unsigned pointerSize = ~0UL; if (ConstantInt* C = dyn_cast<ConstantInt>((*I)->getArraySize())) pointerSize = C->getZExtValue() * \ - TD.getTypeSize((*I)->getAllocatedType()); + TD.getABITypeSize((*I)->getAllocatedType()); // See if the call site touches it AliasAnalysis::ModRefResult A = AA.getModRefInfo(CS, *I, pointerSize); @@ -394,7 +394,7 @@ bool DSE::RemoveUndeadPointers(Value* killPointer, unsigned pointerSize = ~0UL; if (ConstantInt* C = dyn_cast<ConstantInt>((*I)->getArraySize())) pointerSize = C->getZExtValue() * \ - TD.getTypeSize((*I)->getAllocatedType()); + TD.getABITypeSize((*I)->getAllocatedType()); // See if this pointer could alias it AliasAnalysis::AliasResult A = AA.alias(*I, pointerSize, diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index 12523cd3fe..6ebf42a96d 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -4438,7 +4438,7 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) { for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { Value *Op = GEP->getOperand(i); - uint64_t Size = TD.getTypeSize(GTI.getIndexedType()) & PtrSizeMask; + uint64_t Size = TD.getABITypeSize(GTI.getIndexedType()) & PtrSizeMask; if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) { if (OpC->isZero()) continue; @@ -4523,7 +4523,7 @@ Instruction *InstCombiner::FoldGEPICmp(User *GEPLHS, Value *RHS, return ReplaceInstUsesWith(I, UndefValue::get(I.getType())); if (C->isNullValue()) EmitIt = false; - else if (TD->getTypeSize(GTI.getIndexedType()) == 0) { + else if (TD->getABITypeSize(GTI.getIndexedType()) == 0) { EmitIt = false; // This is indexing into a zero sized array? } else if (isa<ConstantInt>(C)) return ReplaceInstUsesWith(I, // No comparison is needed here. @@ -6305,8 +6305,8 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, // same, we open the door to infinite loops of various kinds. if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return 0; - uint64_t AllocElTySize = TD->getTypeSize(AllocElTy); - uint64_t CastElTySize = TD->getTypeSize(CastElTy); + uint64_t AllocElTySize = TD->getABITypeSize(AllocElTy); + uint64_t CastElTySize = TD->getABITypeSize(CastElTy); if (CastElTySize == 0 || AllocElTySize == 0) return 0; // See if we can satisfy the modulus by pulling a scale out of the array @@ -6573,7 +6573,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { // is something like [0 x {int, int}] const Type *IntPtrTy = TD->getIntPtrType(); int64_t FirstIdx = 0; - if (int64_t TySize = TD->getTypeSize(GEPIdxTy)) { + if (int64_t TySize = TD->getABITypeSize(GEPIdxTy)) { FirstIdx = Offset/TySize; Offset %= TySize; @@ -6605,7 +6605,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { } } else if (isa<ArrayType>(GEPIdxTy) || isa<VectorType>(GEPIdxTy)) { const SequentialType *STy = cast<SequentialType>(GEPIdxTy); - if (uint64_t EltSize = TD->getTypeSize(STy->getElementType())) { + if (uint64_t EltSize = TD->getABITypeSize(STy->getElementType())){ NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); Offset %= EltSize; } else { @@ -8644,7 +8644,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // insert it. This explicit cast can make subsequent optimizations more // obvious. Value *Op = GEP.getOperand(i); - if (TD->getTypeSize(Op->getType()) > TD->getPointerSize()) + if (TD->getTypeSizeInBits(Op->getType()) > TD->getPointerSizeInBits()) if (Constant *C = dyn_cast<Constant>(Op)) { GEP.setOperand(i, ConstantExpr::getTrunc(C, TD->getIntPtrType())); MadeChange = true; @@ -8724,12 +8724,12 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { } else if (Constant *GO1C = dyn_cast<Constant>(GO1)) { GO1 = ConstantExpr::getIntegerCast(GO1C, SO1->getType(), true); } else { - unsigned PS = TD->getPointerSize(); - if (TD->getTypeSize(SO1->getType()) == PS) { + unsigned PS = TD->getPointerSizeInBits(); + if (TD->getTypeSizeInBits(SO1->getType()) == PS) { // Convert GO1 to SO1's type. GO1 = InsertCastToIntPtrTy(GO1, SO1->getType(), &GEP, this); - } else if (TD->getTypeSize(GO1->getType()) == PS) { + } else if (TD->getTypeSizeInBits(GO1->getType()) == PS) { // Convert SO1 to GO1's type. SO1 = InsertCastToIntPtrTy(SO1, GO1->getType(), &GEP, this); } else { @@ -8818,8 +8818,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType(); const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType(); if (isa<ArrayType>(SrcElTy) && - TD->getTypeSize(cast<ArrayType>(SrcElTy)->getElementType()) == - TD->getTypeSize(ResElTy)) { + TD->getABITypeSize(cast<ArrayType>(SrcElTy)->getElementType()) == + TD->getABITypeSize(ResElTy)) { Value *Idx[2]; Idx[0] = Constant::getNullValue(Type::Int32Ty); Idx[1] = GEP.getOperand(1); @@ -8837,7 +8837,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (isa<ArrayType>(SrcElTy) && (ResElTy == Type::Int8Ty || ResElTy == Type::Int8Ty)) { uint64_t ArrayEltSize = - TD->getTypeSize(cast<ArrayType>(SrcElTy)->getElementType()); + TD->getABITypeSize(cast<ArrayType>(SrcElTy)->getElementType()); // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We // allow either a mul, shift, or constant here. @@ -8938,7 +8938,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { // Note that we only do this for alloca's, because malloc should allocate and // return a unique pointer, even for a zero byte allocation. if (isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized() && - TD->getTypeSize(AI.getAllocatedType()) == 0) + TD->getABITypeSize(AI.getAllocatedType()) == 0) return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); return 0; diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index 5311a7f04f..08c0a88851 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -366,7 +366,7 @@ bool LICM::canSinkOrHoistInst(Instruction &I) { // Don't hoist loads which have may-aliased stores in loop. unsigned Size = 0; if (LI->getType()->isSized()) - Size = AA->getTargetData().getTypeSize(LI->getType()); + Size = AA->getTargetData().getTypeStoreSize(LI->getType()); return !pointerInvalidatedByLoop(LI->getOperand(0), Size); } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { // Handle obvious cases efficiently. diff --git a/lib/Transforms/Scalar/PredicateSimplifier.cpp b/lib/Transforms/Scalar/PredicateSimplifier.cpp index 3723bcbb0a..e84f096fbb 100644 --- a/lib/Transforms/Scalar/PredicateSimplifier.cpp +++ b/lib/Transforms/Scalar/PredicateSimplifier.cpp @@ -1120,11 +1120,8 @@ namespace { uint32_t typeToWidth(const Type *Ty) const { if (TD) return TD->getTypeSizeInBits(Ty); - - if (const IntegerType *ITy = dyn_cast<IntegerType>(Ty)) - return ITy->getBitWidth(); - - return 0; + else + return Ty->getPrimitiveSizeInBits(); } static bool isRelatedBy(const ConstantRange &CR1, const ConstantRange &CR2, diff --git a/lib/Transforms/Utils/LowerAllocations.cpp b/lib/Transforms/Utils/LowerAllocations.cpp index edc4c8a96f..b089cd6d8b 100644 --- a/lib/Transforms/Utils/LowerAllocations.cpp +++ b/lib/Transforms/Utils/LowerAllocations.cpp @@ -116,7 +116,7 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) { // malloc(type) becomes sbyte *malloc(size) Value *MallocArg; if (LowerMallocArgToInteger) - MallocArg = ConstantInt::get(Type::Int64Ty, TD.getTypeSize(AllocTy)); + MallocArg = ConstantInt::get(Type::Int64Ty, TD.getABITypeSize(AllocTy)); else MallocArg = ConstantExpr::getSizeOf(AllocTy); MallocArg = ConstantExpr::getTruncOrBitCast(cast<Constant>(MallocArg), |