diff options
Diffstat (limited to 'lib/CodeGen/CGExprConstant.cpp')
-rw-r--r-- | lib/CodeGen/CGExprConstant.cpp | 270 |
1 files changed, 135 insertions, 135 deletions
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp index 593406f5d0..bea6d80b43 100644 --- a/lib/CodeGen/CGExprConstant.cpp +++ b/lib/CodeGen/CGExprConstant.cpp @@ -33,63 +33,63 @@ class VISIBILITY_HIDDEN ConstStructBuilder { CodeGenModule &CGM; CodeGenFunction *CGF; - bool Packed; + bool Packed; unsigned NextFieldOffsetInBytes; - + std::vector<llvm::Constant *> Elements; ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF) : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0) { } - bool AppendField(const FieldDecl *Field, uint64_t FieldOffset, + bool AppendField(const FieldDecl *Field, uint64_t FieldOffset, const Expr *InitExpr) { uint64_t FieldOffsetInBytes = FieldOffset / 8; - - assert(NextFieldOffsetInBytes <= FieldOffsetInBytes + + assert(NextFieldOffsetInBytes <= FieldOffsetInBytes && "Field offset mismatch!"); - + // Emit the field. llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF); if (!C) return false; unsigned FieldAlignment = getAlignment(C); - + // Round up the field offset to the alignment of the field type. - uint64_t AlignedNextFieldOffsetInBytes = + uint64_t AlignedNextFieldOffsetInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment); - + if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) { std::vector<llvm::Constant *> PackedElements; - + assert(!Packed && "Alignment is wrong even with a packed struct!"); - + // Convert the struct to a packed struct. uint64_t ElementOffsetInBytes = 0; - + for (unsigned i = 0, e = Elements.size(); i != e; ++i) { llvm::Constant *C = Elements[i]; - - unsigned ElementAlign = + + unsigned ElementAlign = CGM.getTargetData().getABITypeAlignment(C->getType()); - uint64_t AlignedElementOffsetInBytes = + uint64_t AlignedElementOffsetInBytes = llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign); - + if (AlignedElementOffsetInBytes > ElementOffsetInBytes) { // We need some padding. - uint64_t NumBytes = + uint64_t NumBytes = AlignedElementOffsetInBytes - ElementOffsetInBytes; - + const llvm::Type *Ty = llvm::Type::getInt8Ty(CGF->getLLVMContext()); - if (NumBytes > 1) + if (NumBytes > 1) Ty = llvm::ArrayType::get(Ty, NumBytes); - + llvm::Constant *Padding = llvm::Constant::getNullValue(Ty); PackedElements.push_back(Padding); ElementOffsetInBytes += getSizeInBytes(Padding); } - + PackedElements.push_back(C); ElementOffsetInBytes += getSizeInBytes(C); } @@ -105,51 +105,51 @@ class VISIBILITY_HIDDEN ConstStructBuilder { if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) { // We need to append padding. AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes); - + assert(NextFieldOffsetInBytes == FieldOffsetInBytes && "Did not add enough padding!"); - + AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes; } - + // Add the field. Elements.push_back(C); NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C); return true; } - - bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, + + bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, const Expr *InitExpr) { - llvm::ConstantInt *CI = - cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr, - Field->getType(), + llvm::ConstantInt *CI = + cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr, + Field->getType(), CGF)); // FIXME: Can this ever happen? if (!CI) return false; - + if (FieldOffset > NextFieldOffsetInBytes * 8) { // We need to add padding. - uint64_t NumBytes = - llvm::RoundUpToAlignment(FieldOffset - + uint64_t NumBytes = + llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBytes * 8, 8) / 8; - + AppendPadding(NumBytes); } - uint64_t FieldSize = + uint64_t FieldSize = Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue(); llvm::APInt FieldValue = CI->getValue(); - + // Promote the size of FieldValue if necessary // FIXME: This should never occur, but currently it can because initializer // constants are cast to bool, and because clang is not enforcing bitfield // width limits. if (FieldSize > FieldValue.getBitWidth()) FieldValue.zext(FieldSize); - + // Truncate the size of FieldValue to the bit field size. if (FieldSize < FieldValue.getBitWidth()) FieldValue.trunc(FieldSize); @@ -158,18 +158,18 @@ class VISIBILITY_HIDDEN ConstStructBuilder { // Either part of the field or the entire field can go into the previous // byte. assert(!Elements.empty() && "Elements can't be empty!"); - - unsigned BitsInPreviousByte = + + unsigned BitsInPreviousByte = NextFieldOffsetInBytes * 8 - FieldOffset; - - bool FitsCompletelyInPreviousByte = + + bool FitsCompletelyInPreviousByte = BitsInPreviousByte >= FieldValue.getBitWidth(); - + llvm::APInt Tmp = FieldValue; - + if (!FitsCompletelyInPreviousByte) { unsigned NewFieldWidth = FieldSize - BitsInPreviousByte; - + if (CGM.getTargetData().isBigEndian()) { Tmp = Tmp.lshr(NewFieldWidth); Tmp.trunc(BitsInPreviousByte); @@ -184,7 +184,7 @@ class VISIBILITY_HIDDEN ConstStructBuilder { FieldValue.trunc(NewFieldWidth); } } - + Tmp.zext(8); if (CGM.getTargetData().isBigEndian()) { if (FitsCompletelyInPreviousByte) @@ -196,14 +196,14 @@ class VISIBILITY_HIDDEN ConstStructBuilder { // Or in the bits that go into the previous byte. Tmp |= cast<llvm::ConstantInt>(Elements.back())->getValue(); Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp); - + if (FitsCompletelyInPreviousByte) return true; } - + while (FieldValue.getBitWidth() > 8) { llvm::APInt Tmp; - + if (CGM.getTargetData().isBigEndian()) { // We want the high bits. Tmp = FieldValue; @@ -213,13 +213,13 @@ class VISIBILITY_HIDDEN ConstStructBuilder { // We want the low bits. Tmp = FieldValue; Tmp.trunc(8); - + FieldValue = FieldValue.lshr(8); } - + Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp)); NextFieldOffsetInBytes++; - + FieldValue.trunc(FieldValue.getBitWidth() - 8); } @@ -231,10 +231,10 @@ class VISIBILITY_HIDDEN ConstStructBuilder { if (FieldValue.getBitWidth() < 8) { if (CGM.getTargetData().isBigEndian()) { unsigned BitWidth = FieldValue.getBitWidth(); - + FieldValue.zext(8); FieldValue = FieldValue << (8 - BitWidth); - } else + } else FieldValue.zext(8); } @@ -244,19 +244,19 @@ class VISIBILITY_HIDDEN ConstStructBuilder { NextFieldOffsetInBytes++; return true; } - + void AppendPadding(uint64_t NumBytes) { if (!NumBytes) return; const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext()); - if (NumBytes > 1) + if (NumBytes > 1) Ty = llvm::ArrayType::get(Ty, NumBytes); llvm::Constant *C = llvm::Constant::getNullValue(Ty); Elements.push_back(C); assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!"); - + NextFieldOffsetInBytes += getSizeInBytes(C); } @@ -265,19 +265,19 @@ class VISIBILITY_HIDDEN ConstStructBuilder { uint64_t RecordSizeInBytes = RecordSize / 8; assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!"); - + unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes; AppendPadding(NumPadBytes); } - + bool Build(InitListExpr *ILE) { RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl(); const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); - + unsigned FieldNo = 0; unsigned ElementNo = 0; - for (RecordDecl::field_iterator Field = RD->field_begin(), - FieldEnd = RD->field_end(); + for (RecordDecl::field_iterator Field = RD->field_begin(), + FieldEnd = RD->field_end(); ElementNo < ILE->getNumInits() && Field != FieldEnd; ++Field, ++FieldNo) { if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field) @@ -286,7 +286,7 @@ class VISIBILITY_HIDDEN ConstStructBuilder { if (Field->isBitField()) { if (!Field->getIdentifier()) continue; - + if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo), ILE->getInit(ElementNo))) return false; @@ -295,63 +295,63 @@ class VISIBILITY_HIDDEN ConstStructBuilder { ILE->getInit(ElementNo))) return false; } - + ElementNo++; } - + uint64_t LayoutSizeInBytes = Layout.getSize() / 8; - + if (NextFieldOffsetInBytes > LayoutSizeInBytes) { // If the struct is bigger than the size of the record type, // we must have a flexible array member at the end. assert(RD->hasFlexibleArrayMember() && "Must have flexible array member if struct is bigger than type!"); - + // No tail padding is necessary. return true; } - + // Append tail padding if necessary. AppendTailPadding(Layout.getSize()); - - assert(Layout.getSize() / 8 == NextFieldOffsetInBytes && + + assert(Layout.getSize() / 8 == NextFieldOffsetInBytes && "Tail padding mismatch!"); - + return true; } - + unsigned getAlignment(const llvm::Constant *C) const { if (Packed) return 1; - + return CGM.getTargetData().getABITypeAlignment(C->getType()); } - + uint64_t getSizeInBytes(const llvm::Constant *C) const { return CGM.getTargetData().getTypeAllocSize(C->getType()); } - + public: static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) { ConstStructBuilder Builder(CGM, CGF); - + if (!Builder.Build(ILE)) return 0; - - llvm::Constant *Result = + + llvm::Constant *Result = llvm::ConstantStruct::get(CGM.getLLVMContext(), Builder.Elements, Builder.Packed); assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes, - Builder.getAlignment(Result)) == + Builder.getAlignment(Result)) == Builder.getSizeInBytes(Result) && "Size mismatch!"); return Result; } }; - -class VISIBILITY_HIDDEN ConstExprEmitter : + +class VISIBILITY_HIDDEN ConstExprEmitter : public StmtVisitor<ConstExprEmitter, llvm::Constant*> { CodeGenModule &CGM; CodeGenFunction *CGF; @@ -360,23 +360,23 @@ public: ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf) : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) { } - + //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// - + llvm::Constant *VisitStmt(Stmt *S) { return 0; } - - llvm::Constant *VisitParenExpr(ParenExpr *PE) { - return Visit(PE->getSubExpr()); + + llvm::Constant *VisitParenExpr(ParenExpr *PE) { + return Visit(PE->getSubExpr()); } - + llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { return Visit(E->getInitializer()); } - + llvm::Constant *VisitCastExpr(CastExpr* E) { switch (E->getCastKind()) { case CastExpr::CK_ToUnion: { @@ -386,11 +386,11 @@ public: const llvm::Type *Ty = ConvertType(E->getType()); Expr *SubExpr = E->getSubExpr(); - llvm::Constant *C = + llvm::Constant *C = CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF); if (!C) return 0; - + // Build a struct with the union sub-element as the first member, // and padded to the appropriate size std::vector<llvm::Constant*> Elts; @@ -399,7 +399,7 @@ public: Types.push_back(C->getType()); unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType()); unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty); - + assert(CurSize <= TotalSize && "Union size mismatch!"); if (unsigned NumPadBytes = TotalSize - CurSize) { const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext); @@ -409,7 +409,7 @@ public: Elts.push_back(llvm::Constant::getNullValue(Ty)); Types.push_back(Ty); } - + llvm::StructType* STy = llvm::StructType::get(C->getType()->getContext(), Types, false); return llvm::ConstantStruct::get(STy, Elts); @@ -438,7 +438,7 @@ public: unsigned NumInitElements = ILE->getNumInits(); // FIXME: Check for wide strings // FIXME: Check for NumInitElements exactly equal to 1?? - if (NumInitElements > 0 && + if (NumInitElements > 0 && (isa<StringLiteral>(ILE->getInit(0)) || isa<ObjCEncodeExpr>(ILE->getInit(0))) && ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType()) @@ -446,7 +446,7 @@ public: const llvm::Type *ElemTy = AType->getElementType(); unsigned NumElements = AType->getNumElements(); - // Initialising an array requires us to automatically + // Initialising an array requires us to automatically // initialise any elements that have not been initialised explicitly unsigned NumInitableElts = std::min(NumInitElements, NumElements); @@ -472,18 +472,18 @@ public: std::vector<const llvm::Type*> Types; for (unsigned i = 0; i < Elts.size(); ++i) Types.push_back(Elts[i]->getType()); - const llvm::StructType *SType = llvm::StructType::get(AType->getContext(), + const llvm::StructType *SType = llvm::StructType::get(AType->getContext(), Types, true); return llvm::ConstantStruct::get(SType, Elts); } - return llvm::ConstantArray::get(AType, Elts); + return llvm::ConstantArray::get(AType, Elts); } llvm::Constant *EmitStructInitialization(InitListExpr *ILE) { return ConstStructBuilder::BuildStruct(CGM, CGF, ILE); } - + llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) { return ConstStructBuilder::BuildStruct(CGM, CGF, ILE); } @@ -511,13 +511,13 @@ public: for (; i < NumElements; ++i) Elts.push_back(llvm::Constant::getNullValue(ElemTy)); - return llvm::ConstantVector::get(VType, Elts); + return llvm::ConstantVector::get(VType, Elts); } - + llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) { return CGM.EmitNullConstant(E->getType()); } - + llvm::Constant *VisitInitListExpr(InitListExpr *ILE) { if (ILE->getType()->isScalarType()) { // We have a scalar in braces. Just use the first element. @@ -527,7 +527,7 @@ public: } return CGM.EmitNullConstant(ILE->getType()); } - + if (ILE->getType()->isArrayType()) return EmitArrayInitialization(ILE); @@ -548,7 +548,7 @@ public: llvm::Constant *VisitStringLiteral(StringLiteral *E) { assert(!E->getType()->isPointerType() && "Strings are always arrays"); - + // This must be a string initializing an array in a static initializer. // Don't emit it as the address of the string, emit the string data itself // as an inline array. @@ -563,13 +563,13 @@ public: std::string Str; CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str); const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType()); - + // Resize the string to the right size, adding zeros at the end, or // truncating as needed. Str.resize(CAT->getSize().getZExtValue(), '\0'); return llvm::ConstantArray::get(VMContext, Str, false); } - + llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) { return Visit(E->getSubExpr()); } @@ -597,14 +597,14 @@ public: E->getType().getAddressSpace()); return C; } - case Expr::DeclRefExprClass: + case Expr::DeclRefExprClass: case Expr::QualifiedDeclRefExprClass: { NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl(); if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl)) return CGM.GetAddrOfFunction(GlobalDecl(FD)); if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) { // We can never refer to a variable with local storage. - if (!VD->hasLocalStorage()) { + if (!VD->hasLocalStorage()) { if (VD->isFileVarDecl() || VD->hasExternalStorage()) return CGM.GetAddrOfGlobalVar(VD); else if (VD->isBlockVarDecl()) { @@ -627,10 +627,10 @@ public: case Expr::PredefinedExprClass: { // __func__/__FUNCTION__ -> "". __PRETTY_FUNCTION__ -> "top level". std::string Str; - if (cast<PredefinedExpr>(E)->getIdentType() == + if (cast<PredefinedExpr>(E)->getIdentType() == PredefinedExpr::PrettyFunction) Str = "top level"; - + return CGM.GetAddrOfConstantCString(Str, ".tmp"); } case Expr::AddrLabelExprClass: { @@ -643,7 +643,7 @@ public: } case Expr::CallExprClass: { CallExpr* CE = cast<CallExpr>(E); - if (CE->isBuiltinCall(CGM.getContext()) != + if (CE->isBuiltinCall(CGM.getContext()) != Builtin::BI__builtin___CFStringMakeConstantString) break; const Expr *Arg = CE->getArg(0)->IgnoreParenCasts(); @@ -665,23 +665,23 @@ public: return 0; } }; - + } // end anonymous namespace. llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, QualType DestType, CodeGenFunction *CGF) { Expr::EvalResult Result; - + bool Success = false; - + if (DestType->isReferenceType()) Success = E->EvaluateAsLValue(Result, Context); - else + else Success = E->Evaluate(Result, Context); - + if (Success) { - assert(!Result.HasSideEffects && + assert(!Result.HasSideEffects && "Constant expr should not have any side effects!"); switch (Result.Val.getKind()) { case APValue::Uninitialized: @@ -689,17 +689,17 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, return 0; case APValue::LValue: { const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType); - llvm::Constant *Offset = - llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), + llvm::Constant *Offset = + llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), Result.Val.getLValueOffset()); - + llvm::Constant *C; if (const Expr *LVBase = Result.Val.getLValueBase()) { C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase)); // Apply offset if necessary. if (!Offset->isNullValue()) { - const llvm::Type *Type = + const llvm::Type *Type = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type); Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1); @@ -728,9 +728,9 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, } } case APValue::Int: { - llvm::Constant *C = llvm::ConstantInt::get(VMContext, + llvm::Constant *C = llvm::ConstantInt::get(VMContext, Result.Val.getInt()); - + if (C->getType() == llvm::Type::getInt1Ty(VMContext)) { const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); C = llvm::ConstantExpr::getZExt(C, BoolTy); @@ -739,30 +739,30 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, } case APValue::ComplexInt: { llvm::Constant *Complex[2]; - + Complex[0] = llvm::ConstantInt::get(VMContext, Result.Val.getComplexIntReal()); - Complex[1] = llvm::ConstantInt::get(VMContext, + Complex[1] = llvm::ConstantInt::get(VMContext, Result.Val.getComplexIntImag()); - + return llvm::ConstantStruct::get(VMContext, Complex, 2); } case APValue::Float: return llvm::ConstantFP::get(VMContext, Result.Val.getFloat()); case APValue::ComplexFloat: { llvm::Constant *Complex[2]; - - Complex[0] = llvm::ConstantFP::get(VMContext, + + Complex[0] = llvm::ConstantFP::get(VMContext, Result.Val.getComplexFloatReal()); Complex[1] = llvm::ConstantFP::get(VMContext, Result.Val.getComplexFloatImag()); - + return llvm::ConstantStruct::get(VMContext, Complex, 2); } case APValue::Vector: { llvm::SmallVector<llvm::Constant *, 4> Inits; unsigned NumElts = Result.Val.getVectorLength(); - + for (unsigned i = 0; i != NumElts; ++i) { APValue &Elt = Result.Val.getVectorElt(i); if (Elt.isInt()) @@ -787,9 +787,9 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { // No need to check for member pointers when not compiling C++. if (!getContext().getLangOptions().CPlusPlus) return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T)); - + if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) { - + QualType ElementTy = CAT->getElementType(); // FIXME: Handle arrays of structs that contain member pointers. @@ -799,8 +799,8 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { std::vector<llvm::Constant *> Array(NumElements); for (uint64_t i = 0; i != NumElements; ++i) Array[i] = Element; - - const llvm::ArrayType *ATy = + + const llvm::ArrayType *ATy = cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T)); return llvm::ConstantArray::get(ATy, Array); } @@ -808,19 +808,19 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { if (const RecordType *RT = T->getAs<RecordType>()) { const RecordDecl *RD = RT->getDecl(); - // FIXME: It would be better if there was a way to explicitly compute the + // FIXME: It would be better if there was a way to explicitly compute the // record layout instead of converting to a type. Types.ConvertTagDeclType(RD); - + const CGRecordLayout &Layout = Types.getCGRecordLayout(RD); if (Layout.containsMemberPointer()) { assert(0 && "FIXME: No support for structs with member pointers yet!"); } } - + // FIXME: Handle structs that contain member pointers. - if (T->isMemberPointerType()) + if (T->isMemberPointerType()) return llvm::Constant::getAllOnesValue(getTypes().ConvertTypeForMem(T)); - + return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T)); } |