diff options
53 files changed, 152 insertions, 152 deletions
diff --git a/bindings/ocaml/target/llvm_target.mli b/bindings/ocaml/target/llvm_target.mli index 9378900a03..a82e1b684f 100644 --- a/bindings/ocaml/target/llvm_target.mli +++ b/bindings/ocaml/target/llvm_target.mli @@ -70,7 +70,7 @@ external size_in_bits : TargetData.t -> Llvm.lltype -> Int64.t external store_size : TargetData.t -> Llvm.lltype -> Int64.t = "llvm_store_size" (** Computes the ABI size of a type in bytes for a target. - See the method llvm::TargetData::getTypePaddedSize. *) + See the method llvm::TargetData::getTypeAllocSize. *) external abi_size : TargetData.t -> Llvm.lltype -> Int64.t = "llvm_abi_size" (** Computes the ABI alignment of a type in bytes for a target. diff --git a/include/llvm-c/Target.h b/include/llvm-c/Target.h index 2aa5bd9f76..5de5bc7857 100644 --- a/include/llvm-c/Target.h +++ b/include/llvm-c/Target.h @@ -70,7 +70,7 @@ unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef, LLVMTypeRef); unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef, LLVMTypeRef); /** Computes the ABI size of a type in bytes for a target. - See the method llvm::TargetData::getTypePaddedSize. */ + See the method llvm::TargetData::getTypeAllocSize. */ unsigned long long LLVMABISizeOfType(LLVMTargetDataRef, LLVMTypeRef); /** Computes the ABI alignment of a type in bytes for a target. diff --git a/include/llvm/Target/TargetData.h b/include/llvm/Target/TargetData.h index 8e5c4c34ea..71236c021f 100644 --- a/include/llvm/Target/TargetData.h +++ b/include/llvm/Target/TargetData.h @@ -157,8 +157,8 @@ public: /// Size examples: /// - /// Type SizeInBits StoreSizeInBits PaddedSizeInBits[*] - /// ---- ---------- --------------- ---------------- + /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*] + /// ---- ---------- --------------- --------------- /// i1 1 8 8 /// i8 8 8 8 /// i19 19 24 32 @@ -169,7 +169,7 @@ public: /// Double 64 64 64 /// X86_FP80 80 80 96 /// - /// [*] The padded size depends on the alignment, and thus on the target. + /// [*] The alloc size depends on the alignment, and thus on the target. /// These values are for x86-32 linux. /// getTypeSizeInBits - Return the number of bits necessary to hold the @@ -190,21 +190,21 @@ public: return 8*getTypeStoreSize(Ty); } - /// getTypePaddedSize - Return the offset in bytes between successive objects + /// getTypeAllocSize - Return the offset in bytes between successive objects /// of the specified type, including alignment padding. This is the amount /// that alloca reserves for this type. For example, returns 12 or 16 for /// x86_fp80, depending on alignment. - uint64_t getTypePaddedSize(const Type* Ty) const { + uint64_t getTypeAllocSize(const Type* Ty) const { // Round up to the next alignment boundary. return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty)); } - /// getTypePaddedSizeInBits - Return the offset in bits between successive + /// getTypeAllocSizeInBits - Return the offset in bits between successive /// objects of the specified type, including alignment padding; always a /// multiple of 8. This is the amount that alloca reserves for this type. /// For example, returns 96 or 128 for x86_fp80, depending on alignment. - uint64_t getTypePaddedSizeInBits(const Type* Ty) const { - return 8*getTypePaddedSize(Ty); + uint64_t getTypeAllocSizeInBits(const Type* Ty) const { + return 8*getTypeAllocSize(Ty); } /// getABITypeAlignment - Return the minimum ABI-required alignment for the diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index fe71f04098..d958746359 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -123,7 +123,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size, } if (AccessTy->isSized()) - return TD.getTypePaddedSize(AccessTy) < Size; + return TD.getTypeAllocSize(AccessTy) < Size; return false; } diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 7c99325b2d..00b54413aa 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -77,7 +77,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue()); } else { const SequentialType *SQT = cast<SequentialType>(*GTI); - Offset += TD.getTypePaddedSize(SQT->getElementType())*CI->getSExtValue(); + Offset += TD.getTypeAllocSize(SQT->getElementType())*CI->getSExtValue(); } } return true; @@ -405,8 +405,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, if (const ArrayType *AT = dyn_cast<ArrayType>(GVTy->getElementType())) { const Type *ElTy = AT->getElementType(); - uint64_t PaddedSize = TD->getTypePaddedSize(ElTy); - APInt PSA(L->getValue().getBitWidth(), PaddedSize); + uint64_t AllocSize = TD->getTypeAllocSize(ElTy); + APInt PSA(L->getValue().getBitWidth(), AllocSize); if (ElTy == cast<PointerType>(DestTy)->getElementType() && L->getValue().urem(PSA) == 0) { APInt ElemIdx = L->getValue().udiv(PSA); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 3f2656240e..e65cdd218a 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -1961,7 +1961,7 @@ SCEVHandle ScalarEvolution::createNodeForGEP(User *GEP) { IntPtrTy); LocalOffset = getMulExpr(LocalOffset, - getIntegerSCEV(TD->getTypePaddedSize(*GTI), + getIntegerSCEV(TD->getTypeAllocSize(*GTI), IntPtrTy)); TotalOffset = getAddExpr(TotalOffset, LocalOffset); } diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 20fa69ea24..c4f6faf612 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -459,7 +459,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, const Type *IndexedTy = GTI.getIndexedType(); if (!IndexedTy->isSized()) return; unsigned GEPOpiBits = Index->getType()->getPrimitiveSizeInBits(); - uint64_t TypeSize = TD ? TD->getTypePaddedSize(IndexedTy) : 1; + uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1; LocalMask = APInt::getAllOnesValue(GEPOpiBits); LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); ComputeMaskedBits(Index, LocalMask, diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 0d9d77762b..45462da0d2 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -313,7 +313,7 @@ void AsmPrinter::EmitConstantPool(MachineConstantPool *MCP) { EmitZeros(NewOffset - Offset); const Type *Ty = CPE.getType(); - Offset = NewOffset + TM.getTargetData()->getTypePaddedSize(Ty); + Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty); O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << '_' << CPI << ":\t\t\t\t\t"; @@ -889,12 +889,12 @@ void AsmPrinter::EmitConstantValueOnly(const Constant *CV) { // We can emit the pointer value into this slot if the slot is an // integer slot greater or equal to the size of the pointer. - if (TD->getTypePaddedSize(Ty) >= TD->getTypePaddedSize(Op->getType())) + if (TD->getTypeAllocSize(Ty) >= TD->getTypeAllocSize(Op->getType())) return EmitConstantValueOnly(Op); O << "(("; EmitConstantValueOnly(Op); - APInt ptrMask = APInt::getAllOnesValue(TD->getTypePaddedSizeInBits(Ty)); + APInt ptrMask = APInt::getAllOnesValue(TD->getTypeAllocSizeInBits(Ty)); SmallString<40> S; ptrMask.toStringUnsigned(S); @@ -992,14 +992,14 @@ void AsmPrinter::EmitGlobalConstantStruct(const ConstantStruct *CVS, unsigned AddrSpace) { // Print the fields in successive locations. Pad to align if needed! const TargetData *TD = TM.getTargetData(); - unsigned Size = TD->getTypePaddedSize(CVS->getType()); + unsigned Size = TD->getTypeAllocSize(CVS->getType()); const StructLayout *cvsLayout = TD->getStructLayout(CVS->getType()); uint64_t sizeSoFar = 0; for (unsigned i = 0, e = CVS->getNumOperands(); i != e; ++i) { const Constant* field = CVS->getOperand(i); // Check if padding is needed and insert one or more 0s. - uint64_t fieldSize = TD->getTypePaddedSize(field->getType()); + uint64_t fieldSize = TD->getTypeAllocSize(field->getType()); uint64_t padSize = ((i == e-1 ? Size : cvsLayout->getElementOffset(i+1)) - cvsLayout->getElementOffset(i)) - fieldSize; sizeSoFar += fieldSize + padSize; @@ -1123,7 +1123,7 @@ void AsmPrinter::EmitGlobalConstantFP(const ConstantFP *CFP, << " long double most significant halfword"; O << '\n'; } - EmitZeros(TD->getTypePaddedSize(Type::X86_FP80Ty) - + EmitZeros(TD->getTypeAllocSize(Type::X86_FP80Ty) - TD->getTypeStoreSize(Type::X86_FP80Ty), AddrSpace); return; } else if (CFP->getType() == Type::PPC_FP128Ty) { @@ -1228,7 +1228,7 @@ void AsmPrinter::EmitGlobalConstantLargeInt(const ConstantInt *CI, void AsmPrinter::EmitGlobalConstant(const Constant *CV, unsigned AddrSpace) { const TargetData *TD = TM.getTargetData(); const Type *type = CV->getType(); - unsigned Size = TD->getTypePaddedSize(type); + unsigned Size = TD->getTypeAllocSize(type); if (CV->isNullValue() || isa<UndefValue>(CV)) { EmitZeros(Size, AddrSpace); diff --git a/lib/CodeGen/ELFWriter.cpp b/lib/CodeGen/ELFWriter.cpp index ddc4e3588b..7cc1162352 100644 --- a/lib/CodeGen/ELFWriter.cpp +++ b/lib/CodeGen/ELFWriter.cpp @@ -284,7 +284,7 @@ void ELFWriter::EmitGlobal(GlobalVariable *GV) { unsigned Align = TM.getTargetData()->getPreferredAlignment(GV); unsigned Size = - TM.getTargetData()->getTypePaddedSize(GV->getType()->getElementType()); + TM.getTargetData()->getTypeAllocSize(GV->getType()->getElementType()); // If this global has a zero initializer, it is part of the .bss or common // section. diff --git a/lib/CodeGen/MachOWriter.cpp b/lib/CodeGen/MachOWriter.cpp index d2e87917af..c8787987a3 100644 --- a/lib/CodeGen/MachOWriter.cpp +++ b/lib/CodeGen/MachOWriter.cpp @@ -281,7 +281,7 @@ void MachOCodeEmitter::emitConstantPool(MachineConstantPool *MCP) { // "giant object for PIC" optimization. for (unsigned i = 0, e = CP.size(); i != e; ++i) { const Type *Ty = CP[i].getType(); - unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty); + unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty); MachOWriter::MachOSection *Sec = MOW.getConstSection(CP[i].Val.ConstVal); OutputBuffer SecDataOut(Sec->SectionData, is64Bit, isLittleEndian); @@ -355,7 +355,7 @@ MachOWriter::~MachOWriter() { void MachOWriter::AddSymbolToSection(MachOSection *Sec, GlobalVariable *GV) { const Type *Ty = GV->getType()->getElementType(); - unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty); + unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty); unsigned Align = TM.getTargetData()->getPreferredAlignment(GV); // Reserve space in the .bss section for this symbol while maintaining the @@ -400,7 +400,7 @@ void MachOWriter::AddSymbolToSection(MachOSection *Sec, GlobalVariable *GV) { void MachOWriter::EmitGlobal(GlobalVariable *GV) { const Type *Ty = GV->getType()->getElementType(); - unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty); + unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty); bool NoInit = !GV->hasInitializer(); // If this global has a zero initializer, it is part of the .bss or common @@ -825,7 +825,7 @@ void MachOWriter::InitMem(const Constant *C, void *Addr, intptr_t Offset, continue; } else if (const ConstantVector *CP = dyn_cast<ConstantVector>(PC)) { unsigned ElementSize = - TD->getTypePaddedSize(CP->getType()->getElementType()); + TD->getTypeAllocSize(CP->getType()->getElementType()); for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i) WorkList.push_back(CPair(CP->getOperand(i), PA+i*ElementSize)); } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(PC)) { @@ -926,10 +926,10 @@ void MachOWriter::InitMem(const Constant *C, void *Addr, intptr_t Offset, abort(); } } else if (isa<ConstantAggregateZero>(PC)) { - memset((void*)PA, 0, (size_t)TD->getTypePaddedSize(PC->getType())); + memset((void*)PA, 0, (size_t)TD->getTypeAllocSize(PC->getType())); } else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(PC)) { unsigned ElementSize = - TD->getTypePaddedSize(CPA->getType()->getElementType()); + TD->getTypeAllocSize(CPA->getType()->getElementType()); for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i) WorkList.push_back(CPair(CPA->getOperand(i), PA+i*ElementSize)); } else if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(PC)) { diff --git a/lib/CodeGen/MachOWriter.h b/lib/CodeGen/MachOWriter.h index aacaf2c9e0..20a4084638 100644 --- a/lib/CodeGen/MachOWriter.h +++ b/lib/CodeGen/MachOWriter.h @@ -468,7 +468,7 @@ namespace llvm { const Type *Ty = C->getType(); if (Ty->isPrimitiveType() || Ty->isInteger()) { - unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty); + unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty); switch(Size) { default: break; // Fall through to __TEXT,__const case 4: diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 10b7576a69..8a1dc5d937 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -5694,7 +5694,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, // Get the offsets to the 0 and 1 element of the array so that we can // select between them. SDValue Zero = DAG.getIntPtrConstant(0); - unsigned EltSize = (unsigned)TD.getTypePaddedSize(Elts[0]->getType()); + unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType()); SDValue One = DAG.getIntPtrConstant(EltSize); SDValue Cond = DAG.getSetCC(DL, diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 22051867a0..367cf4cd5c 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -285,7 +285,7 @@ bool FastISel::SelectGetElementPtr(User *I) { if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { if (CI->getZExtValue() == 0) continue; uint64_t Offs = - TD.getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); + TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT); if (N == 0) // Unhandled operand. Halt "fast" selection and bail. @@ -294,7 +294,7 @@ bool FastISel::SelectGetElementPtr(User *I) { } // N = N + Idx * ElementSize; - uint64_t ElementSize = TD.getTypePaddedSize(Ty); + uint64_t ElementSize = TD.getTypeAllocSize(Ty); unsigned IdxN = getRegForGEPIndex(Idx); if (IdxN == 0) // Unhandled operand. Halt "fast" selection and bail. diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 0787f933be..1a3370f0a0 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -3638,7 +3638,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { // Increment the pointer, VAList, to the next vaarg Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, DAG.getConstant(TLI.getTargetData()-> - getTypePaddedSize(VT.getTypeForMVT()), + getTypeAllocSize(VT.getTypeForMVT()), TLI.getPointerTy())); // Store the incremented VAList to the legalized pointer Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Tmp2, V, 0); diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp index 6e38590e24..f1da2583f3 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp @@ -295,7 +295,7 @@ void ScheduleDAGSDNodes::AddOperand(MachineInstr *MI, SDValue Op, Align = TM.getTargetData()->getPrefTypeAlignment(Type); if (Align == 0) { // Alignment of vector types. FIXME! - Align = TM.getTargetData()->getTypePaddedSize(Type); + Align = TM.getTargetData()->getTypeAllocSize(Type); } } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp index 1562c13a3f..b340d0c971 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp @@ -128,7 +128,7 @@ static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty, // Given an array type, recursively traverse the elements. if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { const Type *EltTy = ATy->getElementType(); - uint64_t EltSize = TLI.getTargetData()->getTypePaddedSize(EltTy); + uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, StartingOffset + i * EltSize); @@ -294,7 +294,7 @@ void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf, if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) { const Type *Ty = AI->getAllocatedType(); - uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty); + uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty); unsigned Align = std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), AI->getAlignment()); @@ -2700,7 +2700,7 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) { if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { if (CI->getZExtValue() == 0) continue; uint64_t Offs = - TD->getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); + TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); SDValue OffsVal; unsigned PtrBits = TLI.getPointerTy().getSizeInBits(); if (PtrBits < 64) { @@ -2715,7 +2715,7 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) { } // N = N + Idx * ElementSize; - uint64_t ElementSize = TD->getTypePaddedSize(Ty); + uint64_t ElementSize = TD->getTypeAllocSize(Ty); SDValue IdxN = getValue(Idx); // If the index is smaller or larger than intptr_t, truncate or extend @@ -2756,7 +2756,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) { return; // getValue will auto-populate this. const Type *Ty = I.getAllocatedType(); - uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty); + uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty); unsigned Align = std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), I.getAlignment()); @@ -5199,7 +5199,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { // Otherwise, create a stack slot and emit a store to it before the // asm. const Type *Ty = OpVal->getType(); - uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty); + uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty); unsigned Align = TLI.getTargetData |