diff options
author | Chandler Carruth <chandlerc@gmail.com> | 2012-11-01 08:07:29 +0000 |
---|---|---|
committer | Chandler Carruth <chandlerc@gmail.com> | 2012-11-01 08:07:29 +0000 |
commit | ece6c6bb6329748b92403c06ac87f45c43485911 (patch) | |
tree | 6db9d3586fbfc4b362af63c5d5b5a5498fb569e0 | |
parent | 86ccc55c82651f91fd6a312c5f6a4b511bcd1aec (diff) |
Revert the series of commits starting with r166578 which introduced the
getIntPtrType support for multiple address spaces via a pointer type,
and also introduced a crasher bug in the constant folder reported in
PR14233.
These commits also contained several problems that should really be
addressed before they are re-committed. I have avoided reverting various
cleanups to the DataLayout APIs that are reasonable to have moving
forward in order to reduce the amount of churn, and minimize the number
of commits that were reverted. I've also manually updated merge
conflicts and manually arranged for the getIntPtrType function to stay
in DataLayout and to be defined in a plausible way after this revert.
Thanks to Duncan for working through this exact strategy with me, and
Nick Lewycky for tracking down the really annoying crasher this
triggered. (Test case to follow in its own commit.)
After discussing with Duncan extensively, and based on a note from
Micah, I'm going to continue to back out some more of the more
problematic patches in this series in order to ensure we go into the
LLVM 3.2 branch with a reasonable story here. I'll send a note to
llvmdev explaining what's going on and why.
Summary of reverted revisions:
r166634: Fix a compiler warning with an unused variable.
r166607: Add some cleanup to the DataLayout changes requested by
Chandler.
r166596: Revert "Back out r166591, not sure why this made it through
since I cancelled the command. Bleh, sorry about this!
r166591: Delete a directory that wasn't supposed to be checked in yet.
r166578: Add in support for getIntPtrType to get the pointer type based
on the address space.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@167221 91177308-0d34-0410-b5e6-96231b3b80d8
46 files changed, 463 insertions, 805 deletions
diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h index 9e5d97dd7f..a842898e41 100644 --- a/include/llvm/Analysis/MemoryBuiltins.h +++ b/include/llvm/Analysis/MemoryBuiltins.h @@ -168,8 +168,7 @@ class ObjectSizeOffsetVisitor public: ObjectSizeOffsetVisitor(const DataLayout *TD, const TargetLibraryInfo *TLI, - LLVMContext &Context, bool RoundToAlign = false, - unsigned AS = 0); + LLVMContext &Context, bool RoundToAlign = false); SizeOffsetType compute(Value *V); @@ -230,7 +229,7 @@ class ObjectSizeOffsetEvaluator public: ObjectSizeOffsetEvaluator(const DataLayout *TD, const TargetLibraryInfo *TLI, - LLVMContext &Context, unsigned AS = 0); + LLVMContext &Context); SizeOffsetEvalType compute(Value *V); bool knownSize(SizeOffsetEvalType SizeOffset) { diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index b5025d3318..235adca021 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -628,7 +628,7 @@ namespace llvm { /// getSizeOfExpr - Return an expression for sizeof on the given type. /// - const SCEV *getSizeOfExpr(Type *AllocTy, Type *IntPtrTy); + const SCEV *getSizeOfExpr(Type *AllocTy); /// getAlignOfExpr - Return an expression for alignof on the given type. /// @@ -636,8 +636,7 @@ namespace llvm { /// getOffsetOfExpr - Return an expression for offsetof on the given field. /// - const SCEV *getOffsetOfExpr(StructType *STy, Type *IntPtrTy, - unsigned FieldNo); + const SCEV *getOffsetOfExpr(StructType *STy, unsigned FieldNo); /// getOffsetOfExpr - Return an expression for offsetof on the given field. /// diff --git a/include/llvm/DataLayout.h b/include/llvm/DataLayout.h index 987569d93a..e9d86784ad 100644 --- a/include/llvm/DataLayout.h +++ b/include/llvm/DataLayout.h @@ -258,14 +258,6 @@ public: unsigned getPointerSizeInBits(unsigned AS) const { return getPointerSize(AS) * 8; } - /// Layout pointer size, in bits, based on the type. - /// If this function is called with a pointer type, then - /// the type size of the pointer is returned. - /// If this function is called with a vector of pointers, - /// then the type size of the pointer is returned. - /// Otherwise the type sizeo f a default pointer is returned. - unsigned getPointerTypeSizeInBits(Type* Ty) const; - /// Size examples: /// /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*] @@ -343,7 +335,7 @@ public: /// getIntPtrType - Return an integer type with size at least as big as that /// of a pointer in the given address space. - IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace) const; + IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const; /// getIntPtrType - Return an integer (vector of integer) type with size at /// least as big as that of a pointer of the given pointer (vector of pointer) diff --git a/include/llvm/InstrTypes.h b/include/llvm/InstrTypes.h index 74e51ffa0f..da17f3b80d 100644 --- a/include/llvm/InstrTypes.h +++ b/include/llvm/InstrTypes.h @@ -17,7 +17,6 @@ #define LLVM_INSTRUCTION_TYPES_H #include "llvm/Instruction.h" -#include "llvm/DataLayout.h" #include "llvm/OperandTraits.h" #include "llvm/DerivedTypes.h" #include "llvm/ADT/Twine.h" @@ -577,11 +576,6 @@ public: Type *IntPtrTy ///< Integer type corresponding to pointer ) const; - /// @brief Determine if this cast is a no-op cast. - bool isNoopCast( - const DataLayout &DL ///< DataLayout to get the Int Ptr type from. - ) const; - /// Determine how a pair of casts can be eliminated, if they can be at all. /// This is a helper function for both CastInst and ConstantExpr. /// @returns 0 if the CastInst pair can't be eliminated, otherwise diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h index 5c804d877a..7173781e35 100644 --- a/include/llvm/Transforms/Utils/Local.h +++ b/include/llvm/Transforms/Utils/Local.h @@ -179,9 +179,8 @@ static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) { template<typename IRBuilderTy> Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP, bool NoAssumptions = false) { - unsigned AS = cast<GEPOperator>(GEP)->getPointerAddressSpace(); gep_type_iterator GTI = gep_type_begin(GEP); - Type *IntPtrTy = TD.getIntPtrType(GEP->getContext(), AS); + Type *IntPtrTy = TD.getIntPtrType(GEP->getContext()); Value *Result = Constant::getNullValue(IntPtrTy); // If the GEP is inbounds, we know that none of the addressing operations will @@ -189,6 +188,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP, bool isInBounds = cast<GEPOperator>(GEP)->isInBounds() && !NoAssumptions; // Build a mask for high order bits. + unsigned AS = cast<GEPOperator>(GEP)->getPointerAddressSpace(); unsigned IntPtrWidth = TD.getPointerSizeInBits(AS); uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth); diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index de6d61d78b..146897ad67 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -41,7 +41,7 @@ using namespace llvm; // Constant Folding internal helper functions //===----------------------------------------------------------------------===// -/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with +/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with /// DataLayout. This always returns a non-null constant, but it may be a /// ConstantExpr if unfoldable. static Constant *FoldBitCast(Constant *C, Type *DestTy, @@ -59,9 +59,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, return ConstantExpr::getBitCast(C, DestTy); unsigned NumSrcElts = CDV->getType()->getNumElements(); - + Type *SrcEltTy = CDV->getType()->getElementType(); - + // If the vector is a vector of floating point, convert it to vector of int // to simplify things. if (SrcEltTy->isFloatingPointTy()) { @@ -72,7 +72,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, C = ConstantExpr::getBitCast(C, SrcIVTy); CDV = cast<ConstantDataVector>(C); } - + // Now that we know that the input value is a vector of integers, just shift // and insert them into our result. unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy); @@ -84,43 +84,43 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, else Result |= CDV->getElementAsInteger(i); } - + return ConstantInt::get(IT, Result); } - + // The code below only handles casts to vectors currently. VectorType *DestVTy = dyn_cast<VectorType>(DestTy); if (DestVTy == 0) return ConstantExpr::getBitCast(C, DestTy); - + // If this is a scalar -> vector cast, convert the input into a <1 x scalar> // vector so the code below can handle it uniformly. if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) { Constant *Ops = C; // don't take the address of C! return FoldBitCast(ConstantVector::get(Ops), DestTy, TD); } - + // If this is a bitcast from constant vector -> vector, fold it. if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C)) return ConstantExpr::getBitCast(C, DestTy); - + // If the element types match, VMCore can fold it. unsigned NumDstElt = DestVTy->getNumElements(); unsigned NumSrcElt = C->getType()->getVectorNumElements(); if (NumDstElt == NumSrcElt) return ConstantExpr::getBitCast(C, DestTy); - + Type *SrcEltTy = C->getType()->getVectorElementType(); Type *DstEltTy = DestVTy->getElementType(); - - // Otherwise, we're changing the number of elements in a vector, which + + // Otherwise, we're changing the number of elements in a vector, which // requires endianness information to do the right thing. For example, // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) // folds to (little endian): // <4 x i32> <i32 0, i32 0, i32 1, i32 0> // and to (big endian): // <4 x i32> <i32 0, i32 0, i32 0, i32 1> - + // First thing is first. We only want to think about integer here, so if // we have something in FP form, recast it as integer. if (DstEltTy->isFloatingPointTy()) { @@ -130,11 +130,11 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); // Recursively handle this integer conversion, if possible. C = FoldBitCast(C, DestIVTy, TD); - + // Finally, VMCore can handle this now that #elts line up. return ConstantExpr::getBitCast(C, DestTy); } - + // Okay, we know the destination is integer, if the input is FP, convert // it to integer first. if (SrcEltTy->isFloatingPointTy()) { @@ -148,13 +148,13 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, !isa<ConstantDataVector>(C)) return C; } - + // Now we know that the input and output vectors are both integer vectors // of the same size, and that their #elements is not the same. Do the // conversion here, which depends on whether the input or output has // more elements. bool isLittleEndian = TD.isLittleEndian(); - + SmallVector<Constant*, 32> Result; if (NumDstElt < NumSrcElt) { // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>) @@ -170,15 +170,15 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++)); if (!Src) // Reject constantexpr elements. return ConstantExpr::getBitCast(C, DestTy); - + // Zero extend the element to the right size. Src = ConstantExpr::getZExt(Src, Elt->getType()); - + // Shift it to the right place, depending on endianness. - Src = ConstantExpr::getShl(Src, + Src = ConstantExpr::getShl(Src, ConstantInt::get(Src->getType(), ShiftAmt)); ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; - + // Mix it in. Elt = ConstantExpr::getOr(Elt, Src); } @@ -186,30 +186,30 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, } return ConstantVector::get(Result); } - + // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>) unsigned Ratio = NumDstElt/NumSrcElt; unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); - + // Loop over each source value, expanding into multiple results. for (unsigned i = 0; i != NumSrcElt; ++i) { Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i)); if (!Src) // Reject constantexpr elements. return ConstantExpr::getBitCast(C, DestTy); - + unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); for (unsigned j = 0; j != Ratio; ++j) { // Shift the piece of the value into the right place, depending on // endianness. - Constant *Elt = ConstantExpr::getLShr(Src, + Constant *Elt = ConstantExpr::getLShr(Src, ConstantInt::get(Src->getType(), ShiftAmt)); ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; - + // Truncate and remember this piece. Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); } } - + return ConstantVector::get(Result); } @@ -224,28 +224,28 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, Offset = 0; return true; } - + // Otherwise, if this isn't a constant expr, bail out. ConstantExpr *CE = dyn_cast<ConstantExpr>(C); if (!CE) return false; - + // Look through ptr->int and ptr->ptr casts. if (CE->getOpcode() == Instruction::PtrToInt || CE->getOpcode() == Instruction::BitCast) return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD); - - // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) + + // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) if (CE->getOpcode() == Instruction::GetElementPtr) { // Cannot compute this if the element type of the pointer is missing size // info. if (!cast<PointerType>(CE->getOperand(0)->getType()) ->getElementType()->isSized()) return false; - + // If the base isn't a global+constant, we aren't either. if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD)) return false; - + // Otherwise, add any offset that our operands provide. gep_type_iterator GTI = gep_type_begin(CE); for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end(); @@ -253,7 +253,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, ConstantInt *CI = dyn_cast<ConstantInt>(*i); if (!CI) return false; // Index isn't a simple constant? if (CI->isZero()) continue; // Not adding anything. - + if (StructType *ST = dyn_cast<StructType>(*GTI)) { // N = N + Offset Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue()); @@ -264,7 +264,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, } return true; } - + return false; } @@ -277,27 +277,27 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, const DataLayout &TD) { assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) && "Out of range access"); - + // If this element is zero or undefined, we can just return since *CurPtr is // zero initialized. if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) return true; - + if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { if (CI->getBitWidth() > 64 || (CI->getBitWidth() & 7) != 0) return false; - + uint64_t Val = CI->getZExtValue(); unsigned IntBytes = unsigned(CI->getBitWidth()/8); - + for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8)); ++ByteOffset; } return true; } - + if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { if (CFP->getType()->isDoubleTy()) { C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD); @@ -309,13 +309,13 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, } return false; } - + if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) { const StructLayout *SL = TD.getStructLayout(CS->getType()); unsigned Index = SL->getElementContainingOffset(ByteOffset); uint64_t CurEltOffset = SL->getElementOffset(Index); ByteOffset -= CurEltOffset; - + while (1) { // If the element access is to the element itself and not to tail padding, // read the bytes from the element. @@ -325,9 +325,9 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, BytesLeft, TD)) return false; - + ++Index; - + // Check to see if we read from the last struct element, if so we're done. if (Index == CS->getType()->getNumElements()) return true; @@ -375,11 +375,11 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, } return true; } - + if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { if (CE->getOpcode() == Instruction::IntToPtr && - CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getType())) - return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, + CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext())) + return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, BytesLeft, TD); } @@ -391,7 +391,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, const DataLayout &TD) { Type *LoadTy = cast<PointerType>(C->getType())->getElementType(); IntegerType *IntType = dyn_cast<IntegerType>(LoadTy); - + // If this isn't an integer load we can't fold it directly. if (!IntType) { // If this is a float/double load, we can try folding it as an int32/64 load @@ -415,15 +415,15 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, return FoldBitCast(Res, LoadTy, TD); return 0; } - + unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; if (BytesLoaded > 32 || BytesLoaded == 0) return 0; - + GlobalValue *GVal; int64_t Offset; if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD)) return 0; - + GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal); if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || !GV->getInitializer()->getType()->isSized()) @@ -432,11 +432,11 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, // If we're loading off the beginning of the global, some bytes may be valid, // but we don't try to handle this. if (Offset < 0) return 0; - + // If we're not accessing anything in this constant, the result is undefined. if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType())) return UndefValue::get(IntType); - + unsigned char RawBytes[32] = {0}; if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes, BytesLoaded, TD)) @@ -464,15 +464,15 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, // If the loaded value isn't a constant expr, we can't handle it. ConstantExpr *CE = dyn_cast<ConstantExpr>(C); if (!CE) return 0; - + if (CE->getOpcode() == Instruction::GetElementPtr) { if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) if (GV->isConstant() && GV->hasDefinitiveInitializer()) - if (Constant *V = + if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) return V; } - + // Instead of loading constant c string, use corresponding integer value // directly if string length is small enough. StringRef Str; @@ -500,14 +500,14 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, SingleChar = 0; StrVal = (StrVal << 8) | SingleChar; } - + Constant *Res = ConstantInt::get(CE->getContext(), StrVal); if (Ty->isFloatingPointTy()) Res = ConstantExpr::getBitCast(Res, Ty); return Res; } } - + // If this load comes from anywhere in a constant global, and if the global // is all undef or zero, we know what it loads. if (GlobalVariable *GV = @@ -520,7 +520,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, return UndefValue::get(ResTy); } } - + // Try hard to fold loads from bitcasted strange and non-type-safe things. We // currently don't do any of this for big endian systems. It can be // generalized in the future if someone is interested. @@ -531,7 +531,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){ if (LI->isVolatile()) return 0; - + if (Constant *C = dyn_cast<Constant>(LI->getOperand(0))) return ConstantFoldLoadFromConstPtr(C, TD); @@ -540,23 +540,23 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){ /// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression. /// Attempt to symbolically evaluate the result of a binary operator merging -/// these together. If target data info is available, it is provided as TD, +/// these together. If target data info is available, it is provided as TD, /// otherwise TD is null. static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1, const DataLayout *TD){ // SROA - + // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute // bits. - - + + // If the constant expr is something like &A[123] - &A[4].f, fold this into a // constant. This happens frequently when iterating over a global array. if (Opc == Instruction::Sub && TD) { GlobalValue *GV1, *GV2; int64_t Offs1, Offs2; - + if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD)) if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) && GV1 == GV2) { @@ -564,7 +564,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, return ConstantInt::get(Op0->getType(), Offs1-Offs2); } } - + return 0; } @@ -575,7 +575,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops, Type *ResultTy, const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TD) return 0; - Type *IntPtrTy = TD->getIntPtrType(ResultTy); + Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext()); bool Any = false; SmallVector<Constant*, 32> NewIdxs; @@ -628,15 +628,14 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops, if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() || !Ptr->getType()->isPointerTy()) return 0; - - unsigned AS = cast<PointerType>(Ptr->getType())->getAddressSpace(); - Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext(), AS); + |