diff options
author | Duncan Sands <baldrick@free.fr> | 2009-01-12 20:38:59 +0000 |
---|---|---|
committer | Duncan Sands <baldrick@free.fr> | 2009-01-12 20:38:59 +0000 |
commit | ceb4d1aecb9deffe59b3dcdc9a783ffde8477be9 (patch) | |
tree | b81070777ea57a00082bbc345c47a9499d77d24d /lib | |
parent | ccca7fe6a30ec536de3823c0867806c1f86b2212 (diff) |
Rename getABITypeSize to getTypePaddedSize, as
suggested by Chris.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@62099 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
48 files changed, 145 insertions, 141 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 92cff8ea96..aac9e2d95c 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -195,7 +195,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size, } if (AccessTy->isSized()) - return TD.getABITypeSize(AccessTy) < Size; + return TD.getTypePaddedSize(AccessTy) < Size; return false; } diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index ea559c1010..d4457b3031 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -76,7 +76,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue()); } else { const SequentialType *SQT = cast<SequentialType>(*GTI); - Offset += TD.getABITypeSize(SQT->getElementType())*CI->getSExtValue(); + Offset += TD.getTypePaddedSize(SQT->getElementType())*CI->getSExtValue(); } } return true; diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index b8aac9deb7..120dcd8ad2 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -459,7 +459,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, const Type *IndexedTy = GTI.getIndexedType(); if (!IndexedTy->isSized()) return; unsigned GEPOpiBits = Index->getType()->getPrimitiveSizeInBits(); - uint64_t TypeSize = TD ? TD->getABITypeSize(IndexedTy) : 1; + uint64_t TypeSize = TD ? TD->getTypePaddedSize(IndexedTy) : 1; LocalMask = APInt::getAllOnesValue(GEPOpiBits); LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); ComputeMaskedBits(Index, LocalMask, diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 49966f4bb9..47a19dd893 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -298,7 +298,7 @@ void AsmPrinter::EmitConstantPool(MachineConstantPool *MCP) { // Emit inter-object padding for alignment. if (J != E) { const Type *Ty = Entry.getType(); - unsigned EntSize = TM.getTargetData()->getABITypeSize(Ty); + unsigned EntSize = TM.getTargetData()->getTypePaddedSize(Ty); unsigned ValEnd = Entry.getOffset() + EntSize; EmitZeros(J->second.first.getOffset()-ValEnd); } @@ -857,12 +857,12 @@ void AsmPrinter::EmitConstantValueOnly(const Constant *CV) { // We can emit the pointer value into this slot if the slot is an // integer slot greater or equal to the size of the pointer. - if (TD->getABITypeSize(Ty) >= TD->getABITypeSize(Op->getType())) + if (TD->getTypePaddedSize(Ty) >= TD->getTypePaddedSize(Op->getType())) return EmitConstantValueOnly(Op); O << "(("; EmitConstantValueOnly(Op); - APInt ptrMask = APInt::getAllOnesValue(TD->getABITypeSizeInBits(Ty)); + APInt ptrMask = APInt::getAllOnesValue(TD->getTypePaddedSizeInBits(Ty)); SmallString<40> S; ptrMask.toStringUnsigned(S); @@ -958,14 +958,14 @@ void AsmPrinter::EmitGlobalConstantVector(const ConstantVector *CP) { void AsmPrinter::EmitGlobalConstantStruct(const ConstantStruct *CVS) { // Print the fields in successive locations. Pad to align if needed! const TargetData *TD = TM.getTargetData(); - unsigned Size = TD->getABITypeSize(CVS->getType()); + unsigned Size = TD->getTypePaddedSize(CVS->getType()); const StructLayout *cvsLayout = TD->getStructLayout(CVS->getType()); uint64_t sizeSoFar = 0; for (unsigned i = 0, e = CVS->getNumOperands(); i != e; ++i) { const Constant* field = CVS->getOperand(i); // Check if padding is needed and insert one or more 0s. - uint64_t fieldSize = TD->getABITypeSize(field->getType()); + uint64_t fieldSize = TD->getTypePaddedSize(field->getType()); uint64_t padSize = ((i == e-1 ? Size : cvsLayout->getElementOffset(i+1)) - cvsLayout->getElementOffset(i)) - fieldSize; sizeSoFar += fieldSize + padSize; @@ -1059,7 +1059,7 @@ void AsmPrinter::EmitGlobalConstantFP(const ConstantFP *CFP) { << '\t' << TAI->getCommentString() << " long double most significant halfword\n"; } - EmitZeros(TD->getABITypeSize(Type::X86_FP80Ty) - + EmitZeros(TD->getTypePaddedSize(Type::X86_FP80Ty) - TD->getTypeStoreSize(Type::X86_FP80Ty)); return; } else if (CFP->getType() == Type::PPC_FP128Ty) { @@ -1139,7 +1139,7 @@ void AsmPrinter::EmitGlobalConstantLargeInt(const ConstantInt *CI) { void AsmPrinter::EmitGlobalConstant(const Constant *CV) { const TargetData *TD = TM.getTargetData(); const Type *type = CV->getType(); - unsigned Size = TD->getABITypeSize(type); + unsigned Size = TD->getTypePaddedSize(type); if (CV->isNullValue() || isa<UndefValue>(CV)) { EmitZeros(Size); diff --git a/lib/CodeGen/ELFWriter.cpp b/lib/CodeGen/ELFWriter.cpp index f0da37aca3..5d2fca134e 100644 --- a/lib/CodeGen/ELFWriter.cpp +++ b/lib/CodeGen/ELFWriter.cpp @@ -276,7 +276,7 @@ void ELFWriter::EmitGlobal(GlobalVariable *GV) { unsigned Align = TM.getTargetData()->getPreferredAlignment(GV); unsigned Size = - TM.getTargetData()->getABITypeSize(GV->getType()->getElementType()); + TM.getTargetData()->getTypePaddedSize(GV->getType()->getElementType()); // If this global has a zero initializer, it is part of the .bss or common // section. diff --git a/lib/CodeGen/MachOWriter.cpp b/lib/CodeGen/MachOWriter.cpp index bb15a22b62..ae1f0d4b04 100644 --- a/lib/CodeGen/MachOWriter.cpp +++ b/lib/CodeGen/MachOWriter.cpp @@ -276,7 +276,7 @@ void MachOCodeEmitter::emitConstantPool(MachineConstantPool *MCP) { // "giant object for PIC" optimization. for (unsigned i = 0, e = CP.size(); i != e; ++i) { const Type *Ty = CP[i].getType(); - unsigned Size = TM.getTargetData()->getABITypeSize(Ty); + unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty); MachOWriter::MachOSection *Sec = MOW.getConstSection(CP[i].Val.ConstVal); OutputBuffer SecDataOut(Sec->SectionData, is64Bit, isLittleEndian); @@ -350,7 +350,7 @@ MachOWriter::~MachOWriter() { void MachOWriter::AddSymbolToSection(MachOSection *Sec, GlobalVariable *GV) { const Type *Ty = GV->getType()->getElementType(); - unsigned Size = TM.getTargetData()->getABITypeSize(Ty); + unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty); unsigned Align = TM.getTargetData()->getPreferredAlignment(GV); // Reserve space in the .bss section for this symbol while maintaining the @@ -395,7 +395,7 @@ void MachOWriter::AddSymbolToSection(MachOSection *Sec, GlobalVariable *GV) { void MachOWriter::EmitGlobal(GlobalVariable *GV) { const Type *Ty = GV->getType()->getElementType(); - unsigned Size = TM.getTargetData()->getABITypeSize(Ty); + unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty); bool NoInit = !GV->hasInitializer(); // If this global has a zero initializer, it is part of the .bss or common @@ -820,7 +820,7 @@ void MachOWriter::InitMem(const Constant *C, void *Addr, intptr_t Offset, continue; } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(PC)) { unsigned ElementSize = - TD->getABITypeSize(CP->getType()->getElementType()); + TD->getTypePaddedSize(CP->getType()->getElementType()); for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i) WorkList.push_back(CPair(CP->getOperand(i), PA+i*ElementSize)); } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(PC)) { @@ -921,10 +921,10 @@ void MachOWriter::InitMem(const Constant *C, void *Addr, intptr_t Offset, abort(); } } else if (isa<ConstantAggregateZero>(PC)) { - memset((void*)PA, 0, (size_t)TD->getABITypeSize(PC->getType())); + memset((void*)PA, 0, (size_t)TD->getTypePaddedSize(PC->getType())); } else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(PC)) { unsigned ElementSize = - TD->getABITypeSize(CPA->getType()->getElementType()); + TD->getTypePaddedSize(CPA->getType()->getElementType()); for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i) WorkList.push_back(CPair(CPA->getOperand(i), PA+i*ElementSize)); } else if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(PC)) { diff --git a/lib/CodeGen/MachOWriter.h b/lib/CodeGen/MachOWriter.h index e2b6fa8dd4..aacaf2c9e0 100644 --- a/lib/CodeGen/MachOWriter.h +++ b/lib/CodeGen/MachOWriter.h @@ -468,7 +468,7 @@ namespace llvm { const Type *Ty = C->getType(); if (Ty->isPrimitiveType() || Ty->isInteger()) { - unsigned Size = TM.getTargetData()->getABITypeSize(Ty); + unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty); switch(Size) { default: break; // Fall through to __TEXT,__const case 4: diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index b14c19d5b4..8bae7bbb92 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -503,7 +503,7 @@ unsigned MachineConstantPool::getConstantPoolIndex(Constant *C, unsigned Offset = 0; if (!Constants.empty()) { Offset = Constants.back().getOffset(); - Offset += TD->getABITypeSize(Constants.back().getType()); + Offset += TD->getTypePaddedSize(Constants.back().getType()); Offset = (Offset+AlignMask)&~AlignMask; } @@ -527,7 +527,7 @@ unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V, unsigned Offset = 0; if (!Constants.empty()) { Offset = Constants.back().getOffset(); - Offset += TD->getABITypeSize(Constants.back().getType()); + Offset += TD->getTypePaddedSize(Constants.back().getType()); Offset = (Offset+AlignMask)&~AlignMask; } diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 3d0348d5d1..bcba15192c 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -273,7 +273,7 @@ bool FastISel::SelectGetElementPtr(User *I) { if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { if (CI->getZExtValue() == 0) continue; uint64_t Offs = - TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); + TD.getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT); if (N == 0) // Unhandled operand. Halt "fast" selection and bail. @@ -282,7 +282,7 @@ bool FastISel::SelectGetElementPtr(User *I) { } // N = N + Idx * ElementSize; - uint64_t ElementSize = TD.getABITypeSize(Ty); + uint64_t ElementSize = TD.getTypePaddedSize(Ty); unsigned IdxN = getRegForGEPIndex(Idx); if (IdxN == 0) // Unhandled operand. Halt "fast" selection and bail. diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 97b526f4fe..c535f3a082 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -3572,8 +3572,9 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { SDValue VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0); // Increment the pointer, VAList, to the next vaarg Tmp3 = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList, - DAG.getConstant(TLI.getTargetData()->getABITypeSize(VT.getTypeForMVT()), - TLI.getPointerTy())); + DAG.getConstant(TLI.getTargetData()-> + getTypePaddedSize(VT.getTypeForMVT()), + TLI.getPointerTy())); // Store the incremented VAList to the legalized pointer Tmp3 = DAG.getStore(VAList.getValue(1), Tmp3, Tmp2, V, 0); // Load the actual argument out of the pointer VAList diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp index d617965158..e93d4a64f3 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp @@ -296,7 +296,7 @@ void ScheduleDAGSDNodes::AddOperand(MachineInstr *MI, SDValue Op, Align = TM.getTargetData()->getPreferredTypeAlignmentShift(Type); if (Align == 0) { // Alignment of vector types. FIXME! - Align = TM.getTargetData()->getABITypeSize(Type); + Align = TM.getTargetData()->getTypePaddedSize(Type); Align = Log2_64(Align); } } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp index c8985b47f0..294acd8370 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp @@ -125,7 +125,7 @@ static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty, // Given an array type, recursively traverse the elements. if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { const Type *EltTy = ATy->getElementType(); - uint64_t EltSize = TLI.getTargetData()->getABITypeSize(EltTy); + uint64_t EltSize = TLI.getTargetData()->getTypePaddedSize(EltTy); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, StartingOffset + i * EltSize); @@ -288,7 +288,7 @@ void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf, if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) { const Type *Ty = AI->getAllocatedType(); - uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); + uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty); unsigned Align = std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), AI->getAlignment()); @@ -2603,14 +2603,14 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) { if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { if (CI->getZExtValue() == 0) continue; uint64_t Offs = - TD->getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); + TD->getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); N = DAG.getNode(ISD::ADD, N.getValueType(), N, DAG.getIntPtrConstant(Offs)); continue; } // N = N + Idx * ElementSize; - uint64_t ElementSize = TD->getABITypeSize(Ty); + uint64_t ElementSize = TD->getTypePaddedSize(Ty); SDValue IdxN = getValue(Idx); // If the index is smaller or larger than intptr_t, truncate or extend @@ -2646,7 +2646,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) { return; // getValue will auto-populate this. const Type *Ty = I.getAllocatedType(); - uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); + uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty); unsigned Align = std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), I.getAlignment()); @@ -4951,7 +4951,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { // Otherwise, create a stack slot and emit a store to it before the // asm. const Type *Ty = OpVal->getType(); - uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty); + uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty); unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align); @@ -5236,7 +5236,7 @@ void SelectionDAGLowering::visitMalloc(MallocInst &I) { Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src); // Scale the source by the type size. - uint64_t ElementSize = TD->getABITypeSize(I.getType()->getElementType()); + uint64_t ElementSize = TD->getTypePaddedSize(I.getType()->getElementType()); Src = DAG.getNode(ISD::MUL, Src.getValueType(), Src, DAG.getIntPtrConstant(ElementSize)); @@ -5337,7 +5337,7 @@ void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, const PointerType *Ty = cast<PointerType>(I->getType()); const Type *ElementTy = Ty->getElementType(); unsigned FrameAlign = getByValTypeAlignment(ElementTy); - unsigned FrameSize = getTargetData()->getABITypeSize(ElementTy); + unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy); // For ByVal, alignment should be passed from FE. BE will guess if // this info is not there but there are cases it cannot get right. if (F.getParamAlignment(j)) @@ -5470,7 +5470,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, const PointerType *Ty = cast<PointerType>(Args[i].Ty); const Type *ElementTy = Ty->getElementType(); unsigned FrameAlign = getByValTypeAlignment(ElementTy); - unsigned FrameSize = getTargetData()->getABITypeSize(ElementTy); + unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy); // For ByVal, alignment should come from FE. BE will guess if this // info is not there but there are cases it cannot get right. if (Args[i].Alignment) diff --git a/lib/CodeGen/StackProtector.cpp b/lib/CodeGen/StackProtector.cpp index 5a129c0d9d..7c4d22df8b 100644 --- a/lib/CodeGen/StackProtector.cpp +++ b/lib/CodeGen/StackProtector.cpp @@ -114,7 +114,7 @@ bool StackProtector::RequiresStackProtector() const { if (const ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType())) // If an array has more than SSPBufferSize bytes of allocated space, // then we emit stack protectors. - if (SSPBufferSize <= TD->getABITypeSize(AT)) + if (SSPBufferSize <= TD->getTypePaddedSize(AT)) return true; } } diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp index 1834655712..9c7592841d 100644 --- a/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/lib/ExecutionEngine/ExecutionEngine.cpp @@ -54,7 +54,7 @@ ExecutionEngine::~ExecutionEngine() { char* ExecutionEngine::getMemoryForGV(const GlobalVariable* GV) { const Type *ElTy = GV->getType()->getElementType(); - size_t GVSize = (size_t)getTargetData()->getABITypeSize(ElTy); + size_t GVSize = (size_t)getTargetData()->getTypePaddedSize(ElTy); return new char[GVSize]; } @@ -845,16 +845,16 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) { return; } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) { unsigned ElementSize = - getTargetData()->getABITypeSize(CP->getType()->getElementType()); + getTargetData()->getTypePaddedSize(CP->getType()->getElementType()); for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i) InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize); return; } else if (isa<ConstantAggregateZero>(Init)) { - memset(Addr, 0, (size_t)getTargetData()->getABITypeSize(Init->getType())); + memset(Addr, 0, (size_t)getTargetData()->getTypePaddedSize(Init->getType())); return; } else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) { unsigned ElementSize = - getTargetData()->getABITypeSize(CPA->getType()->getElementType()); + getTargetData()->getTypePaddedSize(CPA->getType()->getElementType()); for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i) InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize); return; @@ -1001,7 +1001,7 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) { InitializeMemory(GV->getInitializer(), GA); const Type *ElTy = GV->getType()->getElementType(); - size_t GVSize = (size_t)getTargetData()->getABITypeSize(ElTy); + size_t GVSize = (size_t)getTargetData()->getTypePaddedSize(ElTy); NumInitBytes += (unsigned)GVSize; ++NumGlobals; } diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp index 639415d7fe..872a40b6ae 100644 --- a/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -750,7 +750,7 @@ void Interpreter::visitAllocationInst(AllocationInst &I) { unsigned NumElements = getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue(); - unsigned TypeSize = (size_t)TD.getABITypeSize(Ty); + unsigned TypeSize = (size_t)TD.getTypePaddedSize(Ty); // Avoid malloc-ing zero bytes, use max()... unsigned MemToAlloc = std::max(1U, NumElements * TypeSize); @@ -810,7 +810,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I, assert(BitWidth == 64 && "Invalid index type for getelementptr"); Idx = (int64_t)IdxGV.IntVal.getZExtValue(); } - Total += TD.getABITypeSize(ST->getElementType())*Idx; + Total += TD.getTypePaddedSize(ST->getElementType())*Idx; } } diff --git a/lib/ExecutionEngine/JIT/JIT.cpp b/lib/ExecutionEngine/JIT/JIT.cpp index 6cd1c50978..1db3662700 100644 --- a/lib/ExecutionEngine/JIT/JIT.cpp +++ b/lib/ExecutionEngine/JIT/JIT.cpp @@ -562,7 +562,7 @@ void *JIT::getOrEmitGlobalVariable(const GlobalVariable *GV) { // emit it into memory. It goes in the same array as the generated // code, jump tables, etc. const Type *GlobalType = GV->getType()->getElementType(); - size_t S = getTargetData()->getABITypeSize(GlobalType); + size_t S = getTargetData()->getTypePaddedSize(GlobalType); size_t A = getTargetData()->getPreferredAlignment(GV); if (GV->isThreadLocal()) { MutexGuard locked(lock); @@ -617,7 +617,7 @@ void *JIT::recompileAndRelinkFunction(Function *F) { /// char* JIT::getMemoryForGV(const GlobalVariable* GV) { const Type *ElTy = GV->getType()->getElementType(); - size_t GVSize = (size_t)getTargetData()->getABITypeSize(ElTy); + size_t GVSize = (size_t)getTargetData()->getTypePaddedSize(ElTy); if (GV->isThreadLocal()) { MutexGuard locked(lock); return TJI.allocateThreadLocalMemory(GVSize); diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp index 3f3f681f61..1067c2287f 100644 --- a/lib/ExecutionEngine/JIT/JITEmitter.cpp +++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp @@ -659,7 +659,7 @@ static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP) { unsigned Size = CPE.Offset; const Type *Ty = CPE.isMachineConstantPoolEntry() ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType(); - Size += TheJIT->getTargetData()->getABITypeSize(Ty); + Size += TheJIT->getTargetData()->getTypePaddedSize(Ty); return Size; } @@ -687,7 +687,7 @@ static uintptr_t RoundUpToAlign(uintptr_t Size, unsigned Alignment) { unsigned JITEmitter::addSizeOfGlobal(const GlobalVariable *GV, unsigned Size) { const Type *ElTy = GV->getType()->getElementType(); - size_t GVSize = (size_t)TheJIT->getTargetData()->getABITypeSize(ElTy); + size_t GVSize = (size_t)TheJIT->getTargetData()->getTypePaddedSize(ElTy); size_t GVAlign = (size_t)TheJIT->getTargetData()->getPreferredAlignment(GV); DOUT << "JIT: Adding in size " << GVSize << " alignment " << GVAlign; @@ -1080,7 +1080,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) { unsigned Size = CPE.Offset; const Type *Ty = CPE.isMachineConstantPoolEntry() ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType(); - Size += TheJIT->getTargetData()->getABITypeSize(Ty); + Size += TheJIT->getTargetData()->getTypePaddedSize(Ty); |