diff options
author | Duncan Sands <baldrick@free.fr> | 2009-01-12 20:38:59 +0000 |
---|---|---|
committer | Duncan Sands <baldrick@free.fr> | 2009-01-12 20:38:59 +0000 |
commit | ceb4d1aecb9deffe59b3dcdc9a783ffde8477be9 (patch) | |
tree | b81070777ea57a00082bbc345c47a9499d77d24d /lib/Transforms | |
parent | ccca7fe6a30ec536de3823c0867806c1f86b2212 (diff) |
Rename getABITypeSize to getTypePaddedSize, as
suggested by Chris.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@62099 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms')
-rw-r--r-- | lib/Transforms/IPO/GlobalOpt.cpp | 4 | ||||
-rw-r--r-- | lib/Transforms/Scalar/CodeGenPrepare.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/Scalar/DeadStoreElimination.cpp | 8 | ||||
-rw-r--r-- | lib/Transforms/Scalar/InstructionCombining.cpp | 30 | ||||
-rw-r--r-- | lib/Transforms/Scalar/LoopStrengthReduce.cpp | 2 | ||||
-rw-r--r-- | lib/Transforms/Scalar/MemCpyOptimizer.cpp | 8 | ||||
-rw-r--r-- | lib/Transforms/Scalar/ScalarReplAggregates.cpp | 45 | ||||
-rw-r--r-- | lib/Transforms/Utils/LowerAllocations.cpp | 3 |
8 files changed, 52 insertions, 50 deletions
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 0fb3359acc..1da33e6c7f 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -511,7 +511,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) { return 0; // It's not worth it. NewGlobals.reserve(NumElements); - uint64_t EltSize = TD.getABITypeSize(STy->getElementType()); + uint64_t EltSize = TD.getTypePaddedSize(STy->getElementType()); unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType()); for (unsigned i = 0, e = NumElements; i != e; ++i) { Constant *In = getAggregateConstantElement(Init, @@ -1445,7 +1445,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, // (2048 bytes currently), as we don't want to introduce a 16M global or // something. if (NElements->getZExtValue()* - TD.getABITypeSize(MI->getAllocatedType()) < 2048) { + TD.getTypePaddedSize(MI->getAllocatedType()) < 2048) { GVI = OptimizeGlobalAddressOfMalloc(GV, MI); return true; } diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index 125b076095..be7ed9c508 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -817,7 +817,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); ConstantOffset += SL->getElementOffset(Idx); } else { - uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType()); + uint64_t TypeSize = TD->getTypePaddedSize(GTI.getIndexedType()); if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { ConstantOffset += CI->getSExtValue()*TypeSize; } else if (TypeSize) { // Scales of zero don't do anything. diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp index 8b40da656e..2d38e76dbe 100644 --- a/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -305,11 +305,11 @@ bool DSE::handleEndBlock(BasicBlock &BB) { if (AllocaInst* A = dyn_cast<AllocaInst>(*I)) { if (ConstantInt* C = dyn_cast<ConstantInt>(A->getArraySize())) pointerSize = C->getZExtValue() * - TD.getABITypeSize(A->getAllocatedType()); + TD.getTypePaddedSize(A->getAllocatedType()); } else { const PointerType* PT = cast<PointerType>( cast<Argument>(*I)->getType()); - pointerSize = TD.getABITypeSize(PT->getElementType()); + pointerSize = TD.getTypePaddedSize(PT->getElementType()); } // See if the call site touches it @@ -382,10 +382,10 @@ bool DSE::RemoveUndeadPointers(Value* killPointer, uint64_t killPointerSize, if (AllocaInst* A = dyn_cast<AllocaInst>(*I)) { if (ConstantInt* C = dyn_cast<ConstantInt>(A->getArraySize())) pointerSize = C->getZExtValue() * - TD.getABITypeSize(A->getAllocatedType()); + TD.getTypePaddedSize(A->getAllocatedType()); } else { const PointerType* PT = cast<PointerType>(cast<Argument>(*I)->getType()); - pointerSize = TD.getABITypeSize(PT->getElementType()); + pointerSize = TD.getTypePaddedSize(PT->getElementType()); } // See if this pointer could alias it diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index 49215317e2..ee3596bde4 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -5142,7 +5142,7 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) { for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; ++i, ++GTI) { Value *Op = *i; - uint64_t Size = TD.getABITypeSize(GTI.getIndexedType()) & PtrSizeMask; + uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType()) & PtrSizeMask; if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) { if (OpC->isZero()) continue; @@ -5233,7 +5233,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I, if (const StructType *STy = dyn_cast<StructType>(*GTI)) { Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { - uint64_t Size = TD.getABITypeSize(GTI.getIndexedType()); + uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType()); Offset += Size*CI->getSExtValue(); } } else { @@ -5249,7 +5249,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I, Value *VariableIdx = GEP->getOperand(i); // Determine the scale factor of the variable element. For example, this is // 4 if the variable index is into an array of i32. - uint64_t VariableScale = TD.getABITypeSize(GTI.getIndexedType()); + uint64_t VariableScale = TD.getTypePaddedSize(GTI.getIndexedType()); // Verify that there are no other variable indices. If so, emit the hard way. for (++i, ++GTI; i != e; ++i, ++GTI) { @@ -5263,7 +5263,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I, if (const StructType *STy = dyn_cast<StructType>(*GTI)) { Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { - uint64_t Size = TD.getABITypeSize(GTI.getIndexedType()); + uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType()); Offset += Size*CI->getSExtValue(); } } @@ -7419,8 +7419,8 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, // same, we open the door to infinite loops of various kinds. if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return 0; - uint64_t AllocElTySize = TD->getABITypeSize(AllocElTy); - uint64_t CastElTySize = TD->getABITypeSize(CastElTy); + uint64_t AllocElTySize = TD->getTypePaddedSize(AllocElTy); + uint64_t CastElTySize = TD->getTypePaddedSize(CastElTy); if (CastElTySize == 0 || AllocElTySize == 0) return 0; // See if we can satisfy the modulus by pulling a scale out of the array @@ -7708,7 +7708,7 @@ static bool FindElementAtOffset(const Type *Ty, int64_t Offset, // is something like [0 x {int, int}] const Type *IntPtrTy = TD->getIntPtrType(); int64_t FirstIdx = 0; - if (int64_t TySize = TD->getABITypeSize(Ty)) { + if (int64_t TySize = TD->getTypePaddedSize(Ty)) { FirstIdx = Offset/TySize; Offset -= FirstIdx*TySize; @@ -7740,7 +7740,7 @@ static bool FindElementAtOffset(const Type *Ty, int64_t Offset, Offset -= SL->getElementOffset(Elt); Ty = STy->getElementType(Elt); } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) { - uint64_t EltSize = TD->getABITypeSize(AT->getElementType()); + uint64_t EltSize = TD->getTypePaddedSize(AT->getElementType()); assert(EltSize && "Cannot index into a zero-sized array"); NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); Offset %= EltSize; @@ -8407,7 +8407,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) { // is a single-index GEP. if (X->getType() == CI.getType()) { // Get the size of the pointee type. - uint64_t Size = TD->getABITypeSize(DestPointee); + uint64_t Size = TD->getTypePaddedSize(DestPointee); // Convert the constant to intptr type. APInt Offset = Cst->getValue(); @@ -8427,7 +8427,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) { // "inttoptr+GEP" instead of "add+intptr". // Get the size of the pointee type. - uint64_t Size = TD->getABITypeSize(DestPointee); + uint64_t Size = TD->getTypePaddedSize(DestPointee); // Convert the constant to intptr type. APInt Offset = Cst->getValue(); @@ -9492,7 +9492,7 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS, const Type* DstTy = cast<PointerType>(CI->getType())->getElementType(); if (!SrcTy->isSized() || !DstTy->isSized()) return false; - if (TD->getABITypeSize(SrcTy) != TD->getABITypeSize(DstTy)) + if (TD->getTypePaddedSize(SrcTy) != TD->getTypePaddedSize(DstTy)) return false; return true; } @@ -10608,8 +10608,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType(); const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType(); if (isa<ArrayType>(SrcElTy) && - TD->getABITypeSize(cast<ArrayType>(SrcElTy)->getElementType()) == - TD->getABITypeSize(ResElTy)) { + TD->getTypePaddedSize(cast<ArrayType>(SrcElTy)->getElementType()) == + TD->getTypePaddedSize(ResElTy)) { Value *Idx[2]; Idx[0] = Constant::getNullValue(Type::Int32Ty); Idx[1] = GEP.getOperand(1); @@ -10626,7 +10626,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (isa<ArrayType>(SrcElTy) && ResElTy == Type::Int8Ty) { uint64_t ArrayEltSize = - TD->getABITypeSize(cast<ArrayType>(SrcElTy)->getElementType()); + TD->getTypePaddedSize(cast<ArrayType>(SrcElTy)->getElementType()); // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We // allow either a mul, shift, or constant here. @@ -10779,7 +10779,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { // Note that we only do this for alloca's, because malloc should allocate and // return a unique pointer, even for a zero byte allocation. if (isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized() && - TD->getABITypeSize(AI.getAllocatedType()) == 0) + TD->getTypePaddedSize(AI.getAllocatedType()) == 0) return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); return 0; diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index fc61b9e9a1..c8f2ce2924 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -330,7 +330,7 @@ SCEVHandle LoopStrengthReduce::GetExpressionSCEV(Instruction *Exp) { Value *OpVal = getCastedVersionOf(opcode, *i); SCEVHandle Idx = SE->getSCEV(OpVal); - uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType()); + uint64_t TypeSize = TD->getTypePaddedSize(GTI.getIndexedType()); if (TypeSize != 1) Idx = SE->getMulExpr(Idx, SE->getConstant(ConstantInt::get(UIntPtrTy, diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 5e9c5ea3d8..be9db9692f 100644 --- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -104,7 +104,7 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, // Otherwise, we have a sequential type like an array or vector. Multiply // the index by the ElementSize. - uint64_t Size = TD.getABITypeSize(GTI.getIndexedType()); + uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType()); Offset += Size*OpC->getSExtValue(); } @@ -511,7 +511,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { if (!srcArraySize) return false; - uint64_t srcSize = TD.getABITypeSize(srcAlloca->getAllocatedType()) * + uint64_t srcSize = TD.getTypePaddedSize(srcAlloca->getAllocatedType()) * srcArraySize->getZExtValue(); if (cpyLength->getZExtValue() < srcSize) @@ -526,7 +526,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { if (!destArraySize) return false; - uint64_t destSize = TD.getABITypeSize(A->getAllocatedType()) * + uint64_t destSize = TD.getTypePaddedSize(A->getAllocatedType()) * destArraySize->getZExtValue(); if (destSize < srcSize) @@ -538,7 +538,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) { return false; const Type* StructTy = cast<PointerType>(A->getType())->getElementType(); - uint64_t destSize = TD.getABITypeSize(StructTy); + uint64_t destSize = TD.getTypePaddedSize(StructTy); if (destSize < srcSize) return false; diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index d7b8b58ab0..18716b7e44 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -243,7 +243,7 @@ bool SROA::performScalarRepl(Function &F) { isa<ArrayType>(AI->getAllocatedType())) && AI->getAllocatedType()->isSized() && // Do not promote any struct whose size is larger than "128" bytes. - TD->getABITypeSize(AI->getAllocatedType()) < SRThreshold && + TD->getTypePaddedSize(AI->getAllocatedType()) < SRThreshold && // Do not promote any struct into more than "32" separate vars. getNumSAElements(AI->getAllocatedType()) < SRThreshold/4) { // Check that all of the users of the allocation are capable of being @@ -562,7 +562,7 @@ void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI, // If not the whole aggregate, give up. if (Length->getZExtValue() != - TD->getABITypeSize(AI->getType()->getElementType())) + TD->getTypePaddedSize(AI->getType()->getElementType())) return MarkUnsafe(Info); // We only know about memcpy/memset/memmove. @@ -595,8 +595,8 @@ void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, // cast a {i32,i32}* to i64* and store through it. This is similar to the // memcpy case and occurs in various "byval" cases and emulated memcpys. if (isa<IntegerType>(SI->getOperand(0)->getType()) && - TD->getABITypeSize(SI->getOperand(0)->getType()) == - TD->getABITypeSize(AI->getType()->getElementType())) { + TD->getTypePaddedSize(SI->getOperand(0)->getType()) == + TD->getTypePaddedSize(AI->getType()->getElementType())) { Info.isMemCpyDst = true; continue; } @@ -607,8 +607,8 @@ void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI, // cast a {i32,i32}* to i64* and load through it. This is similar to the // memcpy case and occurs in various "byval" cases and emulated memcpys. if (isa<IntegerType>(LI->getType()) && - TD->getABITypeSize(LI->getType()) == - TD->getABITypeSize(AI->getType()->getElementType())) { + TD->getTypePaddedSize(LI->getType()) == + TD->getTypePaddedSize(AI->getType()->getElementType())) { Info.isMemCpySrc = true; continue; } @@ -789,7 +789,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(), MI); - unsigned EltSize = TD->getABITypeSize(EltTy); + unsigned EltSize = TD->getTypePaddedSize(EltTy); // Finally, insert the meminst for this element. if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) { @@ -823,13 +823,13 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, // and store the element value to the individual alloca. Value *SrcVal = SI->getOperand(0); const Type *AllocaEltTy = AI->getType()->getElementType(); - uint64_t AllocaSizeBits = TD->getABITypeSizeInBits(AllocaEltTy); + uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy); // If this isn't a store of an integer to the whole alloca, it may be a store // to the first element. Just ignore the store in this case and normal SROA // will handle it. if (!isa<IntegerType>(SrcVal->getType()) || - TD->getABITypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) + TD->getTypePaddedSizeInBits(SrcVal->getType()) != AllocaSizeBits) return; DOUT << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI; @@ -845,7 +845,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, uint64_t Shift = Layout->getElementOffsetInBits(i); if (TD->isBigEndian()) - Shift = AllocaSizeBits-Shift-TD->getABITypeSizeInBits(FieldTy); + Shift = AllocaSizeBits-Shift-TD->getTypePaddedSizeInBits(FieldTy); Value *EltVal = SrcVal; if (Shift) { @@ -880,7 +880,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, } else { const ArrayType *ATy = cast<ArrayType>(AllocaEltTy); const Type *ArrayEltTy = ATy->getElementType(); - uint64_t ElementOffset = TD->getABITypeSizeInBits(ArrayEltTy); + uint64_t ElementOffset = TD->getTypePaddedSizeInBits(ArrayEltTy); uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy); uint64_t Shift; @@ -935,13 +935,13 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, // Extract each element out of the NewElts according to its structure offset // and form the result value. const Type *AllocaEltTy = AI->getType()->getElementType(); - uint64_t AllocaSizeBits = TD->getABITypeSizeInBits(AllocaEltTy); + uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy); // If this isn't a load of the whole alloca to an integer, it may be a load // of the first element. Just ignore the load in this case and normal SROA // will handle it. if (!isa<IntegerType>(LI->getType()) || - TD->getABITypeSizeInBits(LI->getType()) != AllocaSizeBits) + TD->getTypePaddedSizeInBits(LI->getType()) != AllocaSizeBits) return; DOUT << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << *LI; @@ -954,7 +954,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI, Layout = TD->getStructLayout(EltSTy); } else { const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); - ArrayEltBitOffset = TD->getABITypeSizeInBits(ArrayEltTy); + ArrayEltBitOffset = TD->getTypePaddedSizeInBits(ArrayEltTy); } Value *ResultVal = Constant::getNullValue(LI->getType()); @@ -1048,7 +1048,7 @@ static bool HasPadding(const Type *Ty, const TargetData &TD) { } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) { return HasPadding(VTy->getElementType(), TD); } - return TD.getTypeSizeInBits(Ty) != TD.getABITypeSizeInBits(Ty); + return TD.getTypeSizeInBits(Ty) != TD.getTypePaddedSizeInBits(Ty); } /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of @@ -1270,7 +1270,7 @@ const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { // Check to see if this is stepping over an element: GEP Ptr, int C if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) { unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue(); - unsigned ElSize = TD->getABITypeSize(PTy->getElementType()); + unsigned ElSize = TD->getTypePaddedSize(PTy->getElementType()); unsigned BitOffset = Idx*ElSize*8; if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0; @@ -1279,7 +1279,7 @@ const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { if (SubElt == 0) return 0; if (SubElt != Type::VoidTy && SubElt->isInteger()) { const Type *NewTy = - getIntAtLeastAsBigAs(TD->getABITypeSizeInBits(SubElt)+BitOffset); + getIntAtLeastAsBigAs(TD->getTypePaddedSizeInBits(SubElt)+BitOffset); if (NewTy == 0 || MergeInType(NewTy, UsedType, *TD)) return 0; continue; } @@ -1320,7 +1320,8 @@ const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) { } else { return 0; } - const Type *NTy = getIntAtLeastAsBigAs(TD->getABITypeSizeInBits(AggTy)); + const Type *NTy = + getIntAtLeastAsBigAs(TD->getTypePaddedSizeInBits(AggTy)); if (NTy == 0 || MergeInType(NTy, UsedType, *TD)) return 0; const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial); if (SubTy == 0) return 0; @@ -1396,7 +1397,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) { const PointerType *AggPtrTy = cast<PointerType>(GEP->getOperand(0)->getType()); unsigned AggSizeInBits = - TD->getABITypeSizeInBits(AggPtrTy->getElementType()); + TD->getTypePaddedSizeInBits(AggPtrTy->getElementType()); // Check to see if this is stepping over an element: GEP Ptr, int C unsigned NewOffset = Offset; @@ -1417,7 +1418,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) { const Type *AggTy = AggPtrTy->getElementType(); if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) { unsigned ElSizeBits = - TD->getABITypeSizeInBits(SeqTy->getElementType()); + TD->getTypePaddedSizeInBits(SeqTy->getElementType()); NewOffset += ElSizeBits*Idx; } else { @@ -1471,7 +1472,7 @@ Value *SROA::ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI, // Otherwise it must be an element access. unsigned Elt = 0; if (Offset) { - unsigned EltSize = TD->getABITypeSizeInBits(VTy->getElementType()); + unsigned EltSize = TD->getTypePaddedSizeInBits(VTy->getElementType()); Elt = Offset/EltSize; Offset -= EltSize*Elt; } @@ -1557,7 +1558,7 @@ Value *SROA::ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI, SV = new BitCastInst(SV, AllocaType, SV->getName(), SI); } else { // Must be an element insertion. - unsigned Elt = Offset/TD->getABITypeSizeInBits(PTy->getElementType()); + unsigned Elt = Offset/TD->getTypePaddedSizeInBits(PTy->getElementType()); SV = InsertElementInst::Create(Old, SV, ConstantInt::get(Type::Int32Ty, Elt), "tmp", SI); diff --git a/lib/Transforms/Utils/LowerAllocations.cpp b/lib/Transforms/Utils/LowerAllocations.cpp index 6c59926017..9a7f36692f 100644 --- a/lib/Transforms/Utils/LowerAllocations.cpp +++ b/lib/Transforms/Utils/LowerAllocations.cpp @@ -115,7 +115,8 @@ bool LowerAllocations::runOnBasicBlock(BasicBlock &BB) { // malloc(type) becomes sbyte *malloc(size) Value *MallocArg; if (LowerMallocArgToInteger) - MallocArg = ConstantInt::get(Type::Int64Ty, TD.getABITypeSize(AllocTy)); + MallocArg = ConstantInt::get(Type::Int64Ty, + TD.getTypePaddedSize(AllocTy)); else MallocArg = ConstantExpr::getSizeOf(AllocTy); MallocArg = ConstantExpr::getTruncOrBitCast(cast<Constant>(MallocArg), |