aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/Target/TargetData.h58
-rw-r--r--lib/Analysis/AliasAnalysis.cpp6
-rw-r--r--lib/Analysis/AliasAnalysisEvaluator.cpp6
-rw-r--r--lib/Analysis/AliasSetTracker.cpp9
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp4
-rw-r--r--lib/Analysis/ConstantFolding.cpp2
-rw-r--r--lib/Analysis/LoadValueNumbering.cpp2
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp22
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp4
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAG.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp12
-rw-r--r--lib/ExecutionEngine/ExecutionEngine.cpp10
-rw-r--r--lib/ExecutionEngine/Interpreter/Execution.cpp4
-rw-r--r--lib/ExecutionEngine/JIT/JIT.cpp2
-rw-r--r--lib/ExecutionEngine/JIT/JITEmitter.cpp2
-rw-r--r--lib/Target/TargetData.cpp83
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp2
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp2
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp2
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp10
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp28
-rw-r--r--lib/Transforms/Scalar/LICM.cpp2
-rw-r--r--lib/Transforms/Scalar/PredicateSimplifier.cpp7
-rw-r--r--lib/Transforms/Utils/LowerAllocations.cpp2
24 files changed, 135 insertions, 148 deletions
diff --git a/include/llvm/Target/TargetData.h b/include/llvm/Target/TargetData.h
index 026749f3e0..94ef0a2439 100644
--- a/include/llvm/Target/TargetData.h
+++ b/include/llvm/Target/TargetData.h
@@ -155,26 +155,48 @@ public:
/// Target pointer size, in bits
unsigned char getPointerSizeInBits() const { return 8*PointerMemSize; }
- /// getTypeSize - Return the number of bytes necessary to hold the specified
- /// type.
- uint64_t getTypeSize(const Type *Ty) const;
-
- /// getABITypeSize - Return the number of bytes allocated for the specified
- /// type when used as an element in a larger object, including alignment
- /// padding.
- uint64_t getABITypeSize(const Type *Ty) const {
+ /// getTypeSizeInBits - Return the number of bits necessary to hold the
+ /// specified type. For example, returns 36 for i36 and 80 for x86_fp80.
+ uint64_t getTypeSizeInBits(const Type* Ty) const;
+
+ /// getTypeStoreSize - Return the maximum number of bytes that may be
+ /// overwritten by storing the specified type. For example, returns 5
+ /// for i36 and 10 for x86_fp80.
+ uint64_t getTypeStoreSize(const Type *Ty) const {
+ return (getTypeSizeInBits(Ty)+7)/8;
+ }
+
+ /// getTypeStoreSizeInBits - Return the maximum number of bits that may be
+ /// overwritten by storing the specified type; always a multiple of 8. For
+ /// example, returns 40 for i36 and 80 for x86_fp80.
+ uint64_t getTypeStoreSizeInBits(const Type *Ty) const {
+ return 8*getTypeStoreSize(Ty);
+ }
+
+ /// getABITypeSize - Return the offset in bytes between successive objects
+ /// of the specified type, including alignment padding. This is the amount
+ /// that alloca reserves for this type. For example, returns 12 or 16 for
+ /// x86_fp80, depending on alignment.
+ uint64_t getABITypeSize(const Type* Ty) const {
unsigned char Align = getABITypeAlignment(Ty);
- return (getTypeSize(Ty) + Align - 1)/Align*Align;
+ return (getTypeStoreSize(Ty) + Align - 1)/Align*Align;
}
- /// getTypeSizeInBits - Return the number of bits necessary to hold the
- /// specified type.
- uint64_t getTypeSizeInBits(const Type* Ty) const;
+ /// getABITypeSizeInBits - Return the offset in bits between successive
+ /// objects of the specified type, including alignment padding; always a
+ /// multiple of 8. This is the amount that alloca reserves for this type.
+ /// For example, returns 96 or 128 for x86_fp80, depending on alignment.
+ uint64_t getABITypeSizeInBits(const Type* Ty) const {
+ return 8*getABITypeSize(Ty);
+ }
- /// getABITypeSizeInBits - Return the number of bytes allocated for the
- /// specified type when used as an element in a larger object, including
- /// alignment padding.
- uint64_t getABITypeSizeInBits(const Type* Ty) const;
+ /// getTypeSize - Obsolete method, do not use. Replaced by getTypeStoreSize
+ /// and getABITypeSize. For alias analysis of loads and stores you probably
+ /// want getTypeStoreSize. Use getABITypeSize for GEP computations and alloca
+ /// sizing.
+ uint64_t getTypeSize(const Type *Ty) const {
+ return getTypeStoreSize(Ty);
+ }
/// getABITypeAlignment - Return the minimum ABI-required alignment for the
/// specified type.
@@ -238,6 +260,10 @@ public:
return StructSize;
}
+ uint64_t getSizeInBits() const {
+ return 8*StructSize;
+ }
+
unsigned getAlignment() const {
return StructAlignment;
}
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index 5ae2342404..2a3ac5ae17 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -95,7 +95,7 @@ AliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(LoadInst *L, Value *P, unsigned Size) {
- return alias(L->getOperand(0), TD->getTypeSize(L->getType()),
+ return alias(L->getOperand(0), TD->getTypeStoreSize(L->getType()),
P, Size) ? Ref : NoModRef;
}
@@ -103,8 +103,8 @@ AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(StoreInst *S, Value *P, unsigned Size) {
// If the stored address cannot alias the pointer in question, then the
// pointer cannot be modified by the store.
- if (!alias(S->getOperand(1), TD->getTypeSize(S->getOperand(0)->getType()),
- P, Size))
+ if (!alias(S->getOperand(1),
+ TD->getTypeStoreSize(S->getOperand(0)->getType()), P, Size))
return NoModRef;
// If the pointer is a pointer to constant memory, then it could not have been
diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp
index 30965c2fb3..e0457b1778 100644
--- a/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -137,12 +137,12 @@ bool AAEval::runOnFunction(Function &F) {
I1 != E; ++I1) {
unsigned I1Size = 0;
const Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
- if (I1ElTy->isSized()) I1Size = TD.getTypeSize(I1ElTy);
+ if (I1ElTy->isSized()) I1Size = TD.getTypeStoreSize(I1ElTy);
for (std::set<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
unsigned I2Size = 0;
const Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
- if (I2ElTy->isSized()) I2Size = TD.getTypeSize(I2ElTy);
+ if (I2ElTy->isSized()) I2Size = TD.getTypeStoreSize(I2ElTy);
switch (AA.alias(*I1, I1Size, *I2, I2Size)) {
case AliasAnalysis::NoAlias:
@@ -169,7 +169,7 @@ bool AAEval::runOnFunction(Function &F) {
V != Ve; ++V) {
unsigned Size = 0;
const Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
- if (ElTy->isSized()) Size = TD.getTypeSize(ElTy);
+ if (ElTy->isSized()) Size = TD.getTypeStoreSize(ElTy);
switch (AA.getModRefInfo(*C, *V, Size)) {
case AliasAnalysis::NoModRef:
diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp
index 366909c0eb..fcdd1b3399 100644
--- a/lib/Analysis/AliasSetTracker.cpp
+++ b/lib/Analysis/AliasSetTracker.cpp
@@ -269,7 +269,7 @@ bool AliasSetTracker::add(Value *Ptr, unsigned Size) {
bool AliasSetTracker::add(LoadInst *LI) {
bool NewPtr;
AliasSet &AS = addPointer(LI->getOperand(0),
- AA.getTargetData().getTypeSize(LI->getType()),
+ AA.getTargetData().getTypeStoreSize(LI->getType()),
AliasSet::Refs, NewPtr);
if (LI->isVolatile()) AS.setVolatile();
return NewPtr;
@@ -279,7 +279,7 @@ bool AliasSetTracker::add(StoreInst *SI) {
bool NewPtr;
Value *Val = SI->getOperand(0);
AliasSet &AS = addPointer(SI->getOperand(1),
- AA.getTargetData().getTypeSize(Val->getType()),
+ AA.getTargetData().getTypeStoreSize(Val->getType()),
AliasSet::Mods, NewPtr);
if (SI->isVolatile()) AS.setVolatile();
return NewPtr;
@@ -395,7 +395,7 @@ bool AliasSetTracker::remove(Value *Ptr, unsigned Size) {
}
bool AliasSetTracker::remove(LoadInst *LI) {
- unsigned Size = AA.getTargetData().getTypeSize(LI->getType());
+ unsigned Size = AA.getTargetData().getTypeStoreSize(LI->getType());
AliasSet *AS = findAliasSetForPointer(LI->getOperand(0), Size);
if (!AS) return false;
remove(*AS);
@@ -403,7 +403,8 @@ bool AliasSetTracker::remove(LoadInst *LI) {
}
bool AliasSetTracker::remove(StoreInst *SI) {
- unsigned Size = AA.getTargetData().getTypeSize(SI->getOperand(0)->getType());
+ unsigned Size =
+ AA.getTargetData().getTypeStoreSize(SI->getOperand(0)->getType());
AliasSet *AS = findAliasSetForPointer(SI->getOperand(1), Size);
if (!AS) return false;
remove(*AS);
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index 89b5d5c3cb..6aeaec23fa 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -364,7 +364,7 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
// global/alloca/malloc, it cannot be accessing the global (it's
// undefined to load or store bytes before or after an object).
const Type *ElTy = cast<PointerType>(O1->getType())->getElementType();
- unsigned GlobalSize = getTargetData().getTypeSize(ElTy);
+ unsigned GlobalSize = getTargetData().getABITypeSize(ElTy);
if (GlobalSize < V2Size && V2Size != ~0U)
return NoAlias;
}
@@ -382,7 +382,7 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
// global/alloca/malloc, it cannot be accessing the object (it's
// undefined to load or store bytes before or after an object).
const Type *ElTy = cast<PointerType>(O2->getType())->getElementType();
- unsigned GlobalSize = getTargetData().getTypeSize(ElTy);
+ unsigned GlobalSize = getTargetData().getABITypeSize(ElTy);
if (GlobalSize < V1Size && V1Size != ~0U)
return NoAlias;
}
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 599c7697a3..886dd9f4f7 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -74,7 +74,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
} else {
const SequentialType *SQT = cast<SequentialType>(*GTI);
- Offset += TD.getTypeSize(SQT->getElementType())*CI->getSExtValue();
+ Offset += TD.getABITypeSize(SQT->getElementType())*CI->getSExtValue();
}
}
return true;
diff --git a/lib/Analysis/LoadValueNumbering.cpp b/lib/Analysis/LoadValueNumbering.cpp
index f1ade951f3..3af92bc11c 100644
--- a/lib/Analysis/LoadValueNumbering.cpp
+++ b/lib/Analysis/LoadValueNumbering.cpp
@@ -293,7 +293,7 @@ void LoadVN::getEqualNumberNodes(Value *V,
Function *F = LoadBB->getParent();
// Find out how many bytes of memory are loaded by the load instruction...
- unsigned LoadSize = getAnalysis<TargetData>().getTypeSize(LI->getType());
+ unsigned LoadSize = getAnalysis<TargetData>().getTypeStoreSize(LI->getType());
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
// Figure out if the load is invalidated from the entry of the block it is in
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index 538a394d46..5375d52c33 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -78,20 +78,20 @@ Instruction* MemoryDependenceAnalysis::getCallSiteDependency(CallSite C,
uint64_t pointerSize = 0;
if (StoreInst* S = dyn_cast<StoreInst>(QI)) {
pointer = S->getPointerOperand();
- pointerSize = TD.getTypeSize(S->getOperand(0)->getType());
+ pointerSize = TD.getTypeStoreSize(S->getOperand(0)->getType());
} else if (LoadInst* L = dyn_cast<LoadInst>(QI)) {
pointer = L->getPointerOperand();
- pointerSize = TD.getTypeSize(L->getType());
+ pointerSize = TD.getTypeStoreSize(L->getType());
} else if (AllocationInst* AI = dyn_cast<AllocationInst>(QI)) {
pointer = AI;
if (ConstantInt* C = dyn_cast<ConstantInt>(AI->getArraySize()))
pointerSize = C->getZExtValue() * \
- TD.getTypeSize(AI->getAllocatedType());
+ TD.getABITypeSize(AI->getAllocatedType());
else
pointerSize = ~0UL;
} else if (VAArgInst* V = dyn_cast<VAArgInst>(QI)) {
pointer = V->getOperand(0);
- pointerSize = TD.getTypeSize(V->getType());
+ pointerSize = TD.getTypeStoreSize(V->getType());
} else if (FreeInst* F = dyn_cast<FreeInst>(QI)) {
pointer = F->getPointerOperand();
@@ -287,15 +287,15 @@ Instruction* MemoryDependenceAnalysis::getDependency(Instruction* query,
bool queryIsVolatile = false;
if (StoreInst* S = dyn_cast<StoreInst>(query)) {
dependee = S->getPointerOperand();
- dependeeSize = TD.getTypeSize(S->getOperand(0)->getType());
+ dependeeSize = TD.getTypeStoreSize(S->getOperand(0)->getType());
queryIsVolatile = S->isVolatile();
} else if (LoadInst* L = dyn_cast<LoadInst>(query)) {
dependee = L->getPointerOperand();
- dependeeSize = TD.getTypeSize(L->getType());
+ dependeeSize = TD.getTypeStoreSize(L->getType());
queryIsVolatile = L->isVolatile();
} else if (VAArgInst* V = dyn_cast<VAArgInst>(query)) {
dependee = V->getOperand(0);
- dependeeSize = TD.getTypeSize(V->getType());
+ dependeeSize = TD.getTypeStoreSize(V->getType());
} else if (FreeInst* F = dyn_cast<FreeInst>(query)) {
dependee = F->getPointerOperand();
@@ -330,7 +330,7 @@ Instruction* MemoryDependenceAnalysis::getDependency(Instruction* query,
}
pointer = S->getPointerOperand();
- pointerSize = TD.getTypeSize(S->getOperand(0)->getType());
+ pointerSize = TD.getTypeStoreSize(S->getOperand(0)->getType());
} else if (LoadInst* L = dyn_cast<LoadInst>(QI)) {
// All volatile loads/stores depend on each other
if (queryIsVolatile && L->isVolatile()) {
@@ -343,17 +343,17 @@ Instruction* MemoryDependenceAnalysis::getDependency(Instruction* query,
}
pointer = L->getPointerOperand();
- pointerSize = TD.getTypeSize(L->getType());
+ pointerSize = TD.getTypeStoreSize(L->getType());
} else if (AllocationInst* AI = dyn_cast<AllocationInst>(QI)) {
pointer = AI;
if (ConstantInt* C = dyn_cast<ConstantInt>(AI->getArraySize()))
pointerSize = C->getZExtValue() * \
- TD.getTypeSize(AI->getAllocatedType());
+ TD.getABITypeSize(AI->getAllocatedType());
else
pointerSize = ~0UL;
} else if (VAArgInst* V = dyn_cast<VAArgInst>(QI)) {
pointer = V->getOperand(0);
- pointerSize = TD.getTypeSize(V->getType());
+ pointerSize = TD.getTypeStoreSize(V->getType());
} else if (FreeInst* F = dyn_cast<FreeInst>(QI)) {
pointer = F->getPointerOperand();
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 9150e96cc0..8b60d7c2ce 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -3524,7 +3524,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
MVT::ValueType slotVT =
(Node->getOpcode() == ISD::FP_EXTEND) ? oldVT : newVT;
const Type *Ty = MVT::getTypeForValueType(slotVT);
- uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI =
@@ -3618,7 +3618,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) {
// slots and always reusing the same one. We currently always create
// new ones, as reuse may inhibit scheduling.
const Type *Ty = MVT::getTypeForValueType(ExtraVT);
- uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI =
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp
index bb5379c349..c98c1312fd 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp
@@ -482,7 +482,7 @@ void ScheduleDAG::AddOperand(MachineInstr *MI, SDOperand Op,
Align = TM.getTargetData()->getPreferredTypeAlignmentShift(Type);
if (Align == 0) {
// Alignment of vector types. FIXME!
- Align = TM.getTargetData()->getTypeSize(Type);
+ Align = TM.getTargetData()->getABITypeSize(Type);
Align = Log2_64(Align);
}
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index b103e28b54..a5b161fbef 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -261,7 +261,7 @@ FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
const Type *Ty = AI->getAllocatedType();
- uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
AI->getAlignment());
@@ -2335,7 +2335,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
return; // getValue will auto-populate this.
const Type *Ty = I.getAllocatedType();
- uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
I.getAlignment());
@@ -3546,7 +3546,7 @@ void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
// Otherwise, create a stack slot and emit a store to it before the
// asm.
const Type *Ty = OpVal->getType();
- uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
+ uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
@@ -3804,7 +3804,7 @@ void SelectionDAGLowering::visitMalloc(MallocInst &I) {
Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
// Scale the source by the type size.
- uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType());
+ uint64_t ElementSize = TD->getABITypeSize(I.getType()->getElementType());
Src = DAG.getNode(ISD::MUL, Src.getValueType(),
Src, getIntPtrConstant(ElementSize));
@@ -3917,7 +3917,7 @@ TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
const StructType *STy = cast<StructType>(Ty->getElementType());
unsigned StructAlign =
Log2_32(getTargetData()->getCallFrameTypeAlignment(STy));
- unsigned StructSize = getTargetData()->getTypeSize(STy);
+ unsigned StructSize = getTargetData()->getABITypeSize(STy);
Flags |= (StructAlign << ISD::ParamFlags::ByValAlignOffs);
Flags |= (StructSize << ISD::ParamFlags::ByValSizeOffs);
}
@@ -4047,7 +4047,7 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
const StructType *STy = cast<StructType>(Ty->getElementType());
unsigned StructAlign =
Log2_32(getTargetData()->getCallFrameTypeAlignment(STy));
- unsigned StructSize = getTargetData()->getTypeSize(STy);
+ unsigned StructSize = getTargetData()->getABITypeSize(STy);
Flags |= (StructAlign << ISD::ParamFlags::ByValAlignOffs);
Flags |= (StructSize << ISD::ParamFlags::ByValSizeOffs);
}
diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp
index d89a9bb4ac..72db4e4360 100644
--- a/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -735,7 +735,7 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
return;
} else if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
unsigned ElementSize =
- getTargetData()->getTypeSize(CP->getType()->getElementType());
+ getTargetData()->getABITypeSize(CP->getType()->getElementType());
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
return;
@@ -744,7 +744,7 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType());
return;
} else if (isa<ConstantAggregateZero>(Init)) {
- memset(Addr, 0, (size_t)getTargetData()->getTypeSize(Init->getType()));
+ memset(Addr, 0, (size_t)getTargetData()->getABITypeSize(Init->getType()));
return;
}
@@ -752,7 +752,7 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
case Type::ArrayTyID: {
const ConstantArray *CPA = cast<ConstantArray>(Init);
unsigned ElementSize =
- getTargetData()->getTypeSize(CPA->getType()->getElementType());
+ getTargetData()->getABITypeSize(CPA->getType()->getElementType());
for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
return;
@@ -843,7 +843,7 @@ void ExecutionEngine::emitGlobals() {
const Type *Ty = I->getType()->getElementType();
// Allocate some memory for it!
- unsigned Size = TD->getTypeSize(Ty);
+ unsigned Size = TD->getABITypeSize(Ty);
addGlobalMapping(I, new char[Size]);
} else {
// External variable reference. Try to use the dynamic loader to
@@ -897,7 +897,7 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
DOUT << "Global '" << GV->getName() << "' -> " << GA << "\n";
const Type *ElTy = GV->getType()->getElementType();
- size_t GVSize = (size_t)getTargetData()->getTypeSize(ElTy);
+ size_t GVSize = (size_t)getTargetData()->getABITypeSize(ElTy);
if (GA == 0) {
// If it's not already specified, allocate memory for the global.
GA = new char[GVSize];
diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp
index f11cf816b2..6ab123125f 100644
--- a/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -746,7 +746,7 @@ void Interpreter::visitAllocationInst(AllocationInst &I) {
unsigned NumElements =
getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
- unsigned TypeSize = (size_t)TD.getTypeSize(Ty);
+ unsigned TypeSize = (size_t)TD.getABITypeSize(Ty);
// Avoid malloc-ing zero bytes, use max()...
unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
@@ -806,7 +806,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
Idx = (int64_t)IdxGV.IntVal.getZExtValue();
else
assert(0 && "Invalid index type for getelementptr");
- Total += TD.getTypeSize(ST->getElementType())*Idx;
+ Total += TD.getABITypeSize(ST->getElementType())*Idx;
}
}
diff --git a/lib/ExecutionEngine/JIT/JIT.cpp b/lib/ExecutionEngine/JIT/JIT.cpp
index 640520f313..5b04124cd0 100644
--- a/lib/ExecutionEngine/JIT/JIT.cpp
+++ b/lib/ExecutionEngine/JIT/JIT.cpp
@@ -337,7 +337,7 @@ void *JIT::getOrEmitGlobalVariable(const GlobalVariable *GV) {
// actually initialize the global after current function has finished
// compilation.
const Type *GlobalType = GV->getType()->getElementType();
- size_t S = getTargetData()->getTypeSize(GlobalType);
+ size_t S = getTargetData()->getABITypeSize(GlobalType);
size_t A = getTargetData()->getPrefTypeAlignment(GlobalType);
if (A <= 8) {
Ptr = malloc(S);
diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp
index b7af521dd9..eab322cce4 100644
--- a/lib/ExecutionEngine/JIT/JITEmitter.cpp
+++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp
@@ -899,7 +899,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
unsigned Size = CPE.Offset;
const Type *Ty = CPE.isMachineConstantPoolEntry()
? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType();
- Size += TheJIT->getTargetData()->getTypeSize(Ty);
+ Size += TheJIT->getTargetData()->getABITypeSize(Ty);
ConstantPoolBase = allocateSpace(Size, 1 << MCP->getConstantPoolAlignment());
ConstantPool = MCP;
diff --git a/lib/Target/TargetData.cpp b/lib/Target/TargetData.cpp
index b1b78a8ada..5a189205ed 100644
--- a/lib/Target/TargetData.cpp
+++ b/lib/Target/TargetData.cpp
@@ -49,14 +49,13 @@ StructLayout::StructLayout(const StructType *ST, const TargetData &TD) {
// Loop over each of the elements, placing them in memory...
for (unsigned i = 0, e = NumElements; i != e; ++i) {
const Type *Ty = ST->getElementType(i);
- unsigned TyAlign;
- uint64_t TySize;
- TyAlign = (ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty));
- TySize = TD.getTypeSize(Ty);
+ unsigned TyAlign = ST->isPacked() ?
+ 1 : TD.getABITypeAlignment(Ty);
+ uint64_t TySize = ST->isPacked() ?
+ TD.getTypeStoreSize(Ty) : TD.getABITypeSize(Ty);
- // Add padding if necessary to make the data element aligned properly...
- if (StructSize % TyAlign != 0)
- StructSize = (StructSize/TyAlign + 1) * TyAlign; // Add padding...
+ // Add padding if necessary to align the data element properly...
+ StructSize = (StructSize + TyAlign - 1)/TyAlign * TyAlign;
// Keep track of maximum alignment constraint
StructAlignment = std::max(TyAlign, StructAlignment);
@@ -406,83 +405,47 @@ std::string TargetData::getStringRepresentation() const {
}
-uint64_t TargetData::getTypeSize(const Type *Ty) const {
+uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const {
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
switch (Ty->getTypeID()) {
case Type::LabelTyID:
case Type::PointerTyID:
- return getPointerSize();
+ return getPointerSizeInBits();
case Type::ArrayTyID: {
const ArrayType *ATy = cast<ArrayType>(Ty);
- uint64_t Size;
- unsigned char Alignment;
- Size = getTypeSize(ATy->getElementType());
- Alignment = getABITypeAlignment(ATy->getElementType());
- uint64_t AlignedSize = (Size + Alignment - 1)/Alignment*Alignment;
- return AlignedSize*ATy->getNumElements();
+ return getABITypeSizeInBits(ATy->getElementType())*ATy->getNumElements();
}
case Type::StructTyID: {
// Get the layout annotation... which is lazily created on demand.
const StructLayout *Layout = getStructLayout(cast<StructType>(Ty));
- return Layout->getSizeInBytes();
- }
- case Type::IntegerTyID: {
- unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
- if (BitWidth <= 8) {
- return 1;
- } else if (BitWidth <= 16) {
- return 2;
- } else if (BitWidth <= 32) {
- return 4;
- } else if (BitWidth <= 64) {
- return 8;
- } else {
- // The size of this > 64 bit type is chosen as a multiple of the
- // preferred alignment of the largest "native" size the target supports.
- // We first obtain the the alignment info for this type and then compute
- // the next largest multiple of that size.
- uint64_t size = getAlignmentInfo(INTEGER_ALIGN, BitWidth, false) * 8;
- return (((BitWidth / (size)) + (BitWidth % size != 0)) * size) / 8;
- }
- break;
+ return Layout->getSizeInBits();
}
+ case Type::IntegerTyID:
+ return cast<IntegerType>(Ty)->getBitWidth();
case Type::VoidTyID:
- return 1;
+ return 8;
case Type::FloatTyID:
- return 4;
+ return 32;
case Type::DoubleTyID:
- return 8;
+ return 64;
case Type::PPC_FP128TyID:
case Type::FP128TyID:
- return 16;
+ return 128;
// In memory objects this is always aligned to a higher boundary, but
- // only 10 bytes contain information.
+ // only 80 bits contain information.
case Type::X86_FP80TyID:
- return 10;
+ return 80;
case Type::VectorTyID: {
const VectorType *PTy = cast<VectorType>(Ty);
- return PTy->getBitWidth() / 8;
+ return PTy->getBitWidth();
}
default:
- assert(0 && "TargetData::getTypeSize(): Unsupported type");
+ assert(0 && "TargetData::getTypeSizeInBits(): Unsupported type");
break;
}
return 0;
}
-uint64_t TargetData::getTypeSizeInBits(const Type *Ty) const {
- if (Ty->isInteger())
- return cast<IntegerType>(Ty)->getBitWidth();
- else
- return getTypeSize(Ty) * 8;
-}
-
-uint64_t TargetData::getABITypeSizeInBits(const Type *Ty) const {
- if (Ty->isInteger())
- return cast<IntegerType>(Ty)->getBitWidth();
- else
- return getABITypeSize(Ty) * 8;
-}
/*!
\param abi_or_pref Flag that determines which alignment is returned. true
returns the ABI alignment, false returns the preferred alignment.
@@ -542,7 +505,7 @@ unsigned char TargetData::getAlignment(const Type *Ty, bool abi_or_pref) const {
break;
}
- return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSize(Ty) * 8,
+ return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty),
abi_or_pref);
}
@@ -603,7 +566,7 @@ uint64_t TargetData::getIndexedOffset(const Type *ptrTy, Value* const* Indices,
// Get the array index and the size of each array element.
int64_t arrayIdx = cast<ConstantInt>(Indices[CurIDX])->getSExtValue();
- Result += arrayIdx * (int64_t)getTypeSize(Ty);
+ Result += arrayIdx * (int64_t)getABITypeSize(Ty);
}
}
@@ -623,7 +586,7 @@ unsigned TargetData::getPreferredAlignmentLog(const GlobalVariable *GV) const {
if (Alignment < 4) {
// If the global is not external, see if it is large. If so, give it a
// larger alignment.
- if (getTypeSize(ElemType) > 128)
+ if (getTypeSizeInBits(ElemType) > 128)
Alignment = 4; // 16-byte alignment.
}
}
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index 85b29f871f..7479c8ee67 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -277,7 +277,7 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg) const {
const PointerType *LoadTy =
cast<PointerType>(Load->getOperand(0)->getType());
- unsigned LoadSize = (unsigned)TD.getTypeSize(LoadTy->getElementType());
+ unsigned LoadSize = (unsigned)TD.getTypeStoreSize(LoadTy->getElementType());
if (AA.canInstructionRangeModify(BB->front(), *Load, Arg, LoadSize))
return false; // Pointer is invalidated!
diff --git a/lib/Transforms/IPO/Gl