aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp2
-rw-r--r--lib/Analysis/ConstantFolding.cpp6
-rw-r--r--lib/Analysis/ScalarEvolution.cpp2
-rw-r--r--lib/Analysis/ValueTracking.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp14
-rw-r--r--lib/CodeGen/ELFWriter.cpp2
-rw-r--r--lib/CodeGen/MachOWriter.cpp12
-rw-r--r--lib/CodeGen/MachOWriter.h2
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp4
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp18
-rw-r--r--lib/CodeGen/StackProtector.cpp2
-rw-r--r--lib/ExecutionEngine/ExecutionEngine.cpp10
-rw-r--r--lib/ExecutionEngine/Interpreter/Execution.cpp4
-rw-r--r--lib/ExecutionEngine/JIT/JIT.cpp4
-rw-r--r--lib/ExecutionEngine/JIT/JITEmitter.cpp6
-rw-r--r--lib/Target/ARM/ARMConstantIslandPass.cpp2
-rw-r--r--lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp2
-rw-r--r--lib/Target/Alpha/AsmPrinter/AlphaAsmPrinter.cpp2
-rw-r--r--lib/Target/CBackend/CBackend.cpp4
-rw-r--r--lib/Target/CellSPU/AsmPrinter/SPUAsmPrinter.cpp2
-rw-r--r--lib/Target/DarwinTargetAsmInfo.cpp4
-rw-r--r--lib/Target/ELFTargetAsmInfo.cpp4
-rw-r--r--lib/Target/IA64/AsmPrinter/IA64AsmPrinter.cpp2
-rw-r--r--lib/Target/MSIL/MSILWriter.cpp20
-rw-r--r--lib/Target/Mips/AsmPrinter/MipsAsmPrinter.cpp2
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp4
-rw-r--r--lib/Target/Mips/MipsTargetAsmInfo.cpp2
-rw-r--r--lib/Target/PIC16/PIC16AsmPrinter.cpp8
-rw-r--r--lib/Target/PIC16/PIC16TargetAsmInfo.cpp4
-rw-r--r--lib/Target/PowerPC/AsmPrinter/PPCAsmPrinter.cpp4
-rw-r--r--lib/Target/Sparc/AsmPrinter/SparcAsmPrinter.cpp2
-rw-r--r--lib/Target/Target.cpp2
-rw-r--r--lib/Target/TargetData.cpp6
-rw-r--r--lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp4
-rw-r--r--lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp2
-rw-r--r--lib/Target/X86/X86FastISel.cpp4
-rw-r--r--lib/Target/XCore/XCoreAsmPrinter.cpp2
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp2
-rw-r--r--lib/Target/XCore/XCoreTargetAsmInfo.cpp2
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp4
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp8
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp30
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp8
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp40
-rw-r--r--lib/Transforms/Utils/AddrModeMatcher.cpp2
-rw-r--r--lib/Transforms/Utils/LowerAllocations.cpp2
49 files changed, 141 insertions, 141 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index fe71f04098..d958746359 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -123,7 +123,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size,
}
if (AccessTy->isSized())
- return TD.getTypePaddedSize(AccessTy) < Size;
+ return TD.getTypeAllocSize(AccessTy) < Size;
return false;
}
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 7c99325b2d..00b54413aa 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -77,7 +77,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
} else {
const SequentialType *SQT = cast<SequentialType>(*GTI);
- Offset += TD.getTypePaddedSize(SQT->getElementType())*CI->getSExtValue();
+ Offset += TD.getTypeAllocSize(SQT->getElementType())*CI->getSExtValue();
}
}
return true;
@@ -405,8 +405,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
if (const ArrayType *AT =
dyn_cast<ArrayType>(GVTy->getElementType())) {
const Type *ElTy = AT->getElementType();
- uint64_t PaddedSize = TD->getTypePaddedSize(ElTy);
- APInt PSA(L->getValue().getBitWidth(), PaddedSize);
+ uint64_t AllocSize = TD->getTypeAllocSize(ElTy);
+ APInt PSA(L->getValue().getBitWidth(), AllocSize);
if (ElTy == cast<PointerType>(DestTy)->getElementType() &&
L->getValue().urem(PSA) == 0) {
APInt ElemIdx = L->getValue().udiv(PSA);
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 3f2656240e..e65cdd218a 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -1961,7 +1961,7 @@ SCEVHandle ScalarEvolution::createNodeForGEP(User *GEP) {
IntPtrTy);
LocalOffset =
getMulExpr(LocalOffset,
- getIntegerSCEV(TD->getTypePaddedSize(*GTI),
+ getIntegerSCEV(TD->getTypeAllocSize(*GTI),
IntPtrTy));
TotalOffset = getAddExpr(TotalOffset, LocalOffset);
}
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 20fa69ea24..c4f6faf612 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -459,7 +459,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
const Type *IndexedTy = GTI.getIndexedType();
if (!IndexedTy->isSized()) return;
unsigned GEPOpiBits = Index->getType()->getPrimitiveSizeInBits();
- uint64_t TypeSize = TD ? TD->getTypePaddedSize(IndexedTy) : 1;
+ uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
LocalMask = APInt::getAllOnesValue(GEPOpiBits);
LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
ComputeMaskedBits(Index, LocalMask,
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 0d9d77762b..45462da0d2 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -313,7 +313,7 @@ void AsmPrinter::EmitConstantPool(MachineConstantPool *MCP) {
EmitZeros(NewOffset - Offset);
const Type *Ty = CPE.getType();
- Offset = NewOffset + TM.getTargetData()->getTypePaddedSize(Ty);
+ Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty);
O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << '_'
<< CPI << ":\t\t\t\t\t";
@@ -889,12 +889,12 @@ void AsmPrinter::EmitConstantValueOnly(const Constant *CV) {
// We can emit the pointer value into this slot if the slot is an
// integer slot greater or equal to the size of the pointer.
- if (TD->getTypePaddedSize(Ty) >= TD->getTypePaddedSize(Op->getType()))
+ if (TD->getTypeAllocSize(Ty) >= TD->getTypeAllocSize(Op->getType()))
return EmitConstantValueOnly(Op);
O << "((";
EmitConstantValueOnly(Op);
- APInt ptrMask = APInt::getAllOnesValue(TD->getTypePaddedSizeInBits(Ty));
+ APInt ptrMask = APInt::getAllOnesValue(TD->getTypeAllocSizeInBits(Ty));
SmallString<40> S;
ptrMask.toStringUnsigned(S);
@@ -992,14 +992,14 @@ void AsmPrinter::EmitGlobalConstantStruct(const ConstantStruct *CVS,
unsigned AddrSpace) {
// Print the fields in successive locations. Pad to align if needed!
const TargetData *TD = TM.getTargetData();
- unsigned Size = TD->getTypePaddedSize(CVS->getType());
+ unsigned Size = TD->getTypeAllocSize(CVS->getType());
const StructLayout *cvsLayout = TD->getStructLayout(CVS->getType());
uint64_t sizeSoFar = 0;
for (unsigned i = 0, e = CVS->getNumOperands(); i != e; ++i) {
const Constant* field = CVS->getOperand(i);
// Check if padding is needed and insert one or more 0s.
- uint64_t fieldSize = TD->getTypePaddedSize(field->getType());
+ uint64_t fieldSize = TD->getTypeAllocSize(field->getType());
uint64_t padSize = ((i == e-1 ? Size : cvsLayout->getElementOffset(i+1))
- cvsLayout->getElementOffset(i)) - fieldSize;
sizeSoFar += fieldSize + padSize;
@@ -1123,7 +1123,7 @@ void AsmPrinter::EmitGlobalConstantFP(const ConstantFP *CFP,
<< " long double most significant halfword";
O << '\n';
}
- EmitZeros(TD->getTypePaddedSize(Type::X86_FP80Ty) -
+ EmitZeros(TD->getTypeAllocSize(Type::X86_FP80Ty) -
TD->getTypeStoreSize(Type::X86_FP80Ty), AddrSpace);
return;
} else if (CFP->getType() == Type::PPC_FP128Ty) {
@@ -1228,7 +1228,7 @@ void AsmPrinter::EmitGlobalConstantLargeInt(const ConstantInt *CI,
void AsmPrinter::EmitGlobalConstant(const Constant *CV, unsigned AddrSpace) {
const TargetData *TD = TM.getTargetData();
const Type *type = CV->getType();
- unsigned Size = TD->getTypePaddedSize(type);
+ unsigned Size = TD->getTypeAllocSize(type);
if (CV->isNullValue() || isa<UndefValue>(CV)) {
EmitZeros(Size, AddrSpace);
diff --git a/lib/CodeGen/ELFWriter.cpp b/lib/CodeGen/ELFWriter.cpp
index ddc4e3588b..7cc1162352 100644
--- a/lib/CodeGen/ELFWriter.cpp
+++ b/lib/CodeGen/ELFWriter.cpp
@@ -284,7 +284,7 @@ void ELFWriter::EmitGlobal(GlobalVariable *GV) {
unsigned Align = TM.getTargetData()->getPreferredAlignment(GV);
unsigned Size =
- TM.getTargetData()->getTypePaddedSize(GV->getType()->getElementType());
+ TM.getTargetData()->getTypeAllocSize(GV->getType()->getElementType());
// If this global has a zero initializer, it is part of the .bss or common
// section.
diff --git a/lib/CodeGen/MachOWriter.cpp b/lib/CodeGen/MachOWriter.cpp
index d2e87917af..c8787987a3 100644
--- a/lib/CodeGen/MachOWriter.cpp
+++ b/lib/CodeGen/MachOWriter.cpp
@@ -281,7 +281,7 @@ void MachOCodeEmitter::emitConstantPool(MachineConstantPool *MCP) {
// "giant object for PIC" optimization.
for (unsigned i = 0, e = CP.size(); i != e; ++i) {
const Type *Ty = CP[i].getType();
- unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
+ unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty);
MachOWriter::MachOSection *Sec = MOW.getConstSection(CP[i].Val.ConstVal);
OutputBuffer SecDataOut(Sec->SectionData, is64Bit, isLittleEndian);
@@ -355,7 +355,7 @@ MachOWriter::~MachOWriter() {
void MachOWriter::AddSymbolToSection(MachOSection *Sec, GlobalVariable *GV) {
const Type *Ty = GV->getType()->getElementType();
- unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
+ unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty);
unsigned Align = TM.getTargetData()->getPreferredAlignment(GV);
// Reserve space in the .bss section for this symbol while maintaining the
@@ -400,7 +400,7 @@ void MachOWriter::AddSymbolToSection(MachOSection *Sec, GlobalVariable *GV) {
void MachOWriter::EmitGlobal(GlobalVariable *GV) {
const Type *Ty = GV->getType()->getElementType();
- unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
+ unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty);
bool NoInit = !GV->hasInitializer();
// If this global has a zero initializer, it is part of the .bss or common
@@ -825,7 +825,7 @@ void MachOWriter::InitMem(const Constant *C, void *Addr, intptr_t Offset,
continue;
} else if (const ConstantVector *CP = dyn_cast<ConstantVector>(PC)) {
unsigned ElementSize =
- TD->getTypePaddedSize(CP->getType()->getElementType());
+ TD->getTypeAllocSize(CP->getType()->getElementType());
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
WorkList.push_back(CPair(CP->getOperand(i), PA+i*ElementSize));
} else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(PC)) {
@@ -926,10 +926,10 @@ void MachOWriter::InitMem(const Constant *C, void *Addr, intptr_t Offset,
abort();
}
} else if (isa<ConstantAggregateZero>(PC)) {
- memset((void*)PA, 0, (size_t)TD->getTypePaddedSize(PC->getType()));
+ memset((void*)PA, 0, (size_t)TD->getTypeAllocSize(PC->getType()));
} else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(PC)) {
unsigned ElementSize =
- TD->getTypePaddedSize(CPA->getType()->getElementType());
+ TD->getTypeAllocSize(CPA->getType()->getElementType());
for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
WorkList.push_back(CPair(CPA->getOperand(i), PA+i*ElementSize));
} else if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(PC)) {
diff --git a/lib/CodeGen/MachOWriter.h b/lib/CodeGen/MachOWriter.h
index aacaf2c9e0..20a4084638 100644
--- a/lib/CodeGen/MachOWriter.h
+++ b/lib/CodeGen/MachOWriter.h
@@ -468,7 +468,7 @@ namespace llvm {
const Type *Ty = C->getType();
if (Ty->isPrimitiveType() || Ty->isInteger()) {
- unsigned Size = TM.getTargetData()->getTypePaddedSize(Ty);
+ unsigned Size = TM.getTargetData()->getTypeAllocSize(Ty);
switch(Size) {
default: break; // Fall through to __TEXT,__const
case 4:
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 10b7576a69..8a1dc5d937 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5694,7 +5694,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
// Get the offsets to the 0 and 1 element of the array so that we can
// select between them.
SDValue Zero = DAG.getIntPtrConstant(0);
- unsigned EltSize = (unsigned)TD.getTypePaddedSize(Elts[0]->getType());
+ unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
SDValue One = DAG.getIntPtrConstant(EltSize);
SDValue Cond = DAG.getSetCC(DL,
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 22051867a0..367cf4cd5c 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -285,7 +285,7 @@ bool FastISel::SelectGetElementPtr(User *I) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->getZExtValue() == 0) continue;
uint64_t Offs =
- TD.getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
+ TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
if (N == 0)
// Unhandled operand. Halt "fast" selection and bail.
@@ -294,7 +294,7 @@ bool FastISel::SelectGetElementPtr(User *I) {
}
// N = N + Idx * ElementSize;
- uint64_t ElementSize = TD.getTypePaddedSize(Ty);
+ uint64_t ElementSize = TD.getTypeAllocSize(Ty);
unsigned IdxN = getRegForGEPIndex(Idx);
if (IdxN == 0)
// Unhandled operand. Halt "fast" selection and bail.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 0787f933be..1a3370f0a0 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -3638,7 +3638,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Increment the pointer, VAList, to the next vaarg
Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
DAG.getConstant(TLI.getTargetData()->
- getTypePaddedSize(VT.getTypeForMVT()),
+ getTypeAllocSize(VT.getTypeForMVT()),
TLI.getPointerTy()));
// Store the incremented VAList to the legalized pointer
Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Tmp2, V, 0);
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
index 6e38590e24..f1da2583f3 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodesEmit.cpp
@@ -295,7 +295,7 @@ void ScheduleDAGSDNodes::AddOperand(MachineInstr *MI, SDValue Op,
Align = TM.getTargetData()->getPrefTypeAlignment(Type);
if (Align == 0) {
// Alignment of vector types. FIXME!
- Align = TM.getTargetData()->getTypePaddedSize(Type);
+ Align = TM.getTargetData()->getTypeAllocSize(Type);
}
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp
index 1562c13a3f..b340d0c971 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuild.cpp
@@ -128,7 +128,7 @@ static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
// Given an array type, recursively traverse the elements.
if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
const Type *EltTy = ATy->getElementType();
- uint64_t EltSize = TLI.getTargetData()->getTypePaddedSize(EltTy);
+ uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
StartingOffset + i * EltSize);
@@ -294,7 +294,7 @@ void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
const Type *Ty = AI->getAllocatedType();
- uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
AI->getAlignment());
@@ -2700,7 +2700,7 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->getZExtValue() == 0) continue;
uint64_t Offs =
- TD->getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
+ TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
SDValue OffsVal;
unsigned PtrBits = TLI.getPointerTy().getSizeInBits();
if (PtrBits < 64) {
@@ -2715,7 +2715,7 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) {
}
// N = N + Idx * ElementSize;
- uint64_t ElementSize = TD->getTypePaddedSize(Ty);
+ uint64_t ElementSize = TD->getTypeAllocSize(Ty);
SDValue IdxN = getValue(Idx);
// If the index is smaller or larger than intptr_t, truncate or extend
@@ -2756,7 +2756,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
return; // getValue will auto-populate this.
const Type *Ty = I.getAllocatedType();
- uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
I.getAlignment());
@@ -5199,7 +5199,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
// Otherwise, create a stack slot and emit a store to it before the
// asm.
const Type *Ty = OpVal->getType();
- uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
@@ -5500,7 +5500,7 @@ void SelectionDAGLowering::visitMalloc(MallocInst &I) {
// i32-ness of the optimizer: we do not want to promote to i64 and then
// multiply on 64-bit targets.
// FIXME: Malloc inst should go away: PR715.
- uint64_t ElementSize = TD->getTypePaddedSize(I.getType()->getElementType());
+ uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType());
if (ElementSize != 1)
Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
Src, DAG.getConstant(ElementSize, Src.getValueType()));
@@ -5614,7 +5614,7 @@ void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
const PointerType *Ty = cast<PointerType>(I->getType());
const Type *ElementTy = Ty->getElementType();
unsigned FrameAlign = getByValTypeAlignment(ElementTy);
- unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy);
+ unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
if (F.getParamAlignment(j))
@@ -5747,7 +5747,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
const PointerType *Ty = cast<PointerType>(Args[i].Ty);
const Type *ElementTy = Ty->getElementType();
unsigned FrameAlign = getByValTypeAlignment(ElementTy);
- unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy);
+ unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
// For ByVal, alignment should come from FE. BE will guess if this
// info is not there but there are cases it cannot get right.
if (Args[i].Alignment)
diff --git a/lib/CodeGen/StackProtector.cpp b/lib/CodeGen/StackProtector.cpp
index c333acd42a..c179f1e3df 100644
--- a/lib/CodeGen/StackProtector.cpp
+++ b/lib/CodeGen/StackProtector.cpp
@@ -114,7 +114,7 @@ bool StackProtector::RequiresStackProtector() const {
if (const ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType()))
// If an array has more than SSPBufferSize bytes of allocated space,
// then we emit stack protectors.
- if (SSPBufferSize <= TD->getTypePaddedSize(AT))
+ if (SSPBufferSize <= TD->getTypeAllocSize(AT))
return true;
}
}
diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp
index 9cd03dac27..29a05bbbdb 100644
--- a/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -55,7 +55,7 @@ ExecutionEngine::~ExecutionEngine() {
char* ExecutionEngine::getMemoryForGV(const GlobalVariable* GV) {
const Type *ElTy = GV->getType()->getElementType();
- size_t GVSize = (size_t)getTargetData()->getTypePaddedSize(ElTy);
+ size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
return new char[GVSize];
}
@@ -848,16 +848,16 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
return;
} else if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
unsigned ElementSize =
- getTargetData()->getTypePaddedSize(CP->getType()->getElementType());
+ getTargetData()->getTypeAllocSize(CP->getType()->getElementType());
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
return;
} else if (isa<ConstantAggregateZero>(Init)) {
- memset(Addr, 0, (size_t)getTargetData()->getTypePaddedSize(Init->getType()));
+ memset(Addr, 0, (size_t)getTargetData()->getTypeAllocSize(Init->getType()));
return;
} else if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
unsigned ElementSize =
- getTargetData()->getTypePaddedSize(CPA->getType()->getElementType());
+ getTargetData()->getTypeAllocSize(CPA->getType()->getElementType());
for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
return;
@@ -1004,7 +1004,7 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
InitializeMemory(GV->getInitializer(), GA);
const Type *ElTy = GV->getType()->getElementType();
- size_t GVSize = (size_t)getTargetData()->getTypePaddedSize(ElTy);
+ size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy);
NumInitBytes += (unsigned)GVSize;
++NumGlobals;
}
diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp
index a79bcc2803..765fed248f 100644
--- a/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -750,7 +750,7 @@ void Interpreter::visitAllocationInst(AllocationInst &I) {
unsigned NumElements =
getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();