diff options
author | Anton Korobeynikov <asl@math.spbu.ru> | 2009-06-05 22:08:42 +0000 |
---|---|---|
committer | Anton Korobeynikov <asl@math.spbu.ru> | 2009-06-05 22:08:42 +0000 |
commit | c4a59eb306efeb4bffa3cefecd1e6392fc5c4144 (patch) | |
tree | b68a1188634b6b223753044144b941ba425da5b6 /lib/CodeGen/TargetABIInfo.cpp | |
parent | acebb397fa5d63835a0de9cee144987057ec1333 (diff) |
Factor out TargetABIInfo stuff into separate file. No functionality change.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@72962 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/TargetABIInfo.cpp')
-rw-r--r-- | lib/CodeGen/TargetABIInfo.cpp | 1379 |
1 files changed, 1379 insertions, 0 deletions
diff --git a/lib/CodeGen/TargetABIInfo.cpp b/lib/CodeGen/TargetABIInfo.cpp new file mode 100644 index 0000000000..573ffed10a --- /dev/null +++ b/lib/CodeGen/TargetABIInfo.cpp @@ -0,0 +1,1379 @@ +//===---- TargetABIInfo.cpp - Encapsulate target ABI details ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// These classes wrap the information about a call or function +// definition used to handle ABI compliancy. +// +//===----------------------------------------------------------------------===// + +#include "ABIInfo.h" +#include "CodeGenFunction.h" +#include "clang/AST/RecordLayout.h" +#include "llvm/Type.h" + +using namespace clang; +using namespace CodeGen; + +ABIInfo::~ABIInfo() {} + +void ABIArgInfo::dump() const { + fprintf(stderr, "(ABIArgInfo Kind="); + switch (TheKind) { + case Direct: + fprintf(stderr, "Direct"); + break; + case Ignore: + fprintf(stderr, "Ignore"); + break; + case Coerce: + fprintf(stderr, "Coerce Type="); + getCoerceToType()->print(llvm::errs()); + break; + case Indirect: + fprintf(stderr, "Indirect Align=%d", getIndirectAlign()); + break; + case Expand: + fprintf(stderr, "Expand"); + break; + } + fprintf(stderr, ")\n"); +} + +static bool isEmptyRecord(ASTContext &Context, QualType T); + +/// isEmptyField - Return true iff a the field is "empty", that is it +/// is an unnamed bit-field or an (array of) empty record(s). +static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) { + if (FD->isUnnamedBitfield()) + return true; + + QualType FT = FD->getType(); + // Constant arrays of empty records count as empty, strip them off. + while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) + FT = AT->getElementType(); + + return isEmptyRecord(Context, FT); +} + +/// isEmptyRecord - Return true iff a structure contains only empty +/// fields. Note that a structure with a flexible array member is not +/// considered empty. +static bool isEmptyRecord(ASTContext &Context, QualType T) { + const RecordType *RT = T->getAsRecordType(); + if (!RT) + return 0; + const RecordDecl *RD = RT->getDecl(); + if (RD->hasFlexibleArrayMember()) + return false; + for (RecordDecl::field_iterator i = RD->field_begin(Context), + e = RD->field_end(Context); i != e; ++i) + if (!isEmptyField(Context, *i)) + return false; + return true; +} + +/// isSingleElementStruct - Determine if a structure is a "single +/// element struct", i.e. it has exactly one non-empty field or +/// exactly one field which is itself a single element +/// struct. Structures with flexible array members are never +/// considered single element structs. +/// +/// \return The field declaration for the single non-empty field, if +/// it exists. +static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { + const RecordType *RT = T->getAsStructureType(); + if (!RT) + return 0; + + const RecordDecl *RD = RT->getDecl(); + if (RD->hasFlexibleArrayMember()) + return 0; + + const Type *Found = 0; + for (RecordDecl::field_iterator i = RD->field_begin(Context), + e = RD->field_end(Context); i != e; ++i) { + const FieldDecl *FD = *i; + QualType FT = FD->getType(); + + // Ignore empty fields. + if (isEmptyField(Context, FD)) + continue; + + // If we already found an element then this isn't a single-element + // struct. + if (Found) + return 0; + + // Treat single element arrays as the element. + while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { + if (AT->getSize().getZExtValue() != 1) + break; + FT = AT->getElementType(); + } + + if (!CodeGenFunction::hasAggregateLLVMType(FT)) { + Found = FT.getTypePtr(); + } else { + Found = isSingleElementStruct(FT, Context); + if (!Found) + return 0; + } + } + + return Found; +} + +static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { + if (!Ty->getAsBuiltinType() && !Ty->isPointerType()) + return false; + + uint64_t Size = Context.getTypeSize(Ty); + return Size == 32 || Size == 64; +} + +static bool areAllFields32Or64BitBasicType(const RecordDecl *RD, + ASTContext &Context) { + for (RecordDecl::field_iterator i = RD->field_begin(Context), + e = RD->field_end(Context); i != e; ++i) { + const FieldDecl *FD = *i; + + if (!is32Or64BitBasicType(FD->getType(), Context)) + return false; + + // FIXME: Reject bit-fields wholesale; there are two problems, we don't know + // how to expand them yet, and the predicate for telling if a bitfield still + // counts as "basic" is more complicated than what we were doing previously. + if (FD->isBitField()) + return false; + } + + return true; +} + +namespace { +/// DefaultABIInfo - The default implementation for ABI specific +/// details. This implementation provides information which results in +/// self-consistent and sensible LLVM IR generation, but does not +/// conform to any particular ABI. +class DefaultABIInfo : public ABIInfo { + ABIArgInfo classifyReturnType(QualType RetTy, + ASTContext &Context) const; + + ABIArgInfo classifyArgumentType(QualType RetTy, + ASTContext &Context) const; + + virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { + FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); + for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it) + it->info = classifyArgumentType(it->type, Context); + } + + virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const; +}; + +/// X86_32ABIInfo - The X86-32 ABI information. +class X86_32ABIInfo : public ABIInfo { + ASTContext &Context; + bool IsDarwin; + + static bool isRegisterSize(unsigned Size) { + return (Size == 8 || Size == 16 || Size == 32 || Size == 64); + } + + static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); + +public: + ABIArgInfo classifyReturnType(QualType RetTy, + ASTContext &Context) const; + + ABIArgInfo classifyArgumentType(QualType RetTy, + ASTContext &Context) const; + + virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { + FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); + for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it) + it->info = classifyArgumentType(it->type, Context); + } + + virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const; + + X86_32ABIInfo(ASTContext &Context, bool d) + : ABIInfo(), Context(Context), IsDarwin(d) {} +}; +} + + +/// shouldReturnTypeInRegister - Determine if the given type should be +/// passed in a register (for the Darwin ABI). +bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, + ASTContext &Context) { + uint64_t Size = Context.getTypeSize(Ty); + + // Type must be register sized. + if (!isRegisterSize(Size)) + return false; + + if (Ty->isVectorType()) { + // 64- and 128- bit vectors inside structures are not returned in + // registers. + if (Size == 64 || Size == 128) + return false; + + return true; + } + + // If this is a builtin, pointer, or complex type, it is ok. + if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType()) + return true; + + // Arrays are treated like records. + if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) + return shouldReturnTypeInRegister(AT->getElementType(), Context); + + // Otherwise, it must be a record type. + const RecordType *RT = Ty->getAsRecordType(); + if (!RT) return false; + + // Structure types are passed in register if all fields would be + // passed in a register. + for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context), + e = RT->getDecl()->field_end(Context); i != e; ++i) { + const FieldDecl *FD = *i; + + // Empty fields are ignored. + if (isEmptyField(Context, FD)) + continue; + + // Check fields recursively. + if (!shouldReturnTypeInRegister(FD->getType(), Context)) + return false; + } + + return true; +} + +ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, + ASTContext &Context) const { + if (RetTy->isVoidType()) { + return ABIArgInfo::getIgnore(); + } else if (const VectorType *VT = RetTy->getAsVectorType()) { + // On Darwin, some vectors are returned in registers. + if (IsDarwin) { + uint64_t Size = Context.getTypeSize(RetTy); + + // 128-bit vectors are a special case; they are returned in + // registers and we need to make sure to pick a type the LLVM + // backend will like. + if (Size == 128) + return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty, + 2)); + + // Always return in register if it fits in a general purpose + // register, or if it is 64 bits and has a single element. + if ((Size == 8 || Size == 16 || Size == 32) || + (Size == 64 && VT->getNumElements() == 1)) + return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); + + return ABIArgInfo::getIndirect(0); + } + + return ABIArgInfo::getDirect(); + } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { + // Structures with flexible arrays are always indirect. + if (const RecordType *RT = RetTy->getAsStructureType()) + if (RT->getDecl()->hasFlexibleArrayMember()) + return ABIArgInfo::getIndirect(0); + + // Outside of Darwin, structs and unions are always indirect. + if (!IsDarwin && !RetTy->isAnyComplexType()) + return ABIArgInfo::getIndirect(0); + + // Classify "single element" structs as their element type. + if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) { + if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) { + if (BT->isIntegerType()) { + // We need to use the size of the structure, padding + // bit-fields can adjust that to be larger than the single + // element type. + uint64_t Size = Context.getTypeSize(RetTy); + return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size)); + } else if (BT->getKind() == BuiltinType::Float) { + assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) && + "Unexpect single element structure size!"); + return ABIArgInfo::getCoerce(llvm::Type::FloatTy); + } else if (BT->getKind() == BuiltinType::Double) { + assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) && + "Unexpect single element structure size!"); + return ABIArgInfo::getCoerce(llvm::Type::DoubleTy); + } + } else if (SeltTy->isPointerType()) { + // FIXME: It would be really nice if this could come out as the proper + // pointer type. + llvm::Type *PtrTy = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + return ABIArgInfo::getCoerce(PtrTy); + } else if (SeltTy->isVectorType()) { + // 64- and 128-bit vectors are never returned in a + // register when inside a structure. + uint64_t Size = Context.getTypeSize(RetTy); + if (Size == 64 || Size == 128) + return ABIArgInfo::getIndirect(0); + + return classifyReturnType(QualType(SeltTy, 0), Context); + } + } + + // Small structures which are register sized are generally returned + // in a register. + if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) { + uint64_t Size = Context.getTypeSize(RetTy); + return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); + } + + return ABIArgInfo::getIndirect(0); + } else { + return ABIArgInfo::getDirect(); + } +} + +ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, + ASTContext &Context) const { + // FIXME: Set alignment on indirect arguments. + if (CodeGenFunction::hasAggregateLLVMType(Ty)) { + // Structures with flexible arrays are always indirect. + if (const RecordType *RT = Ty->getAsStructureType()) + if (RT->getDecl()->hasFlexibleArrayMember()) + return ABIArgInfo::getIndirect(0); + + // Ignore empty structs. + uint64_t Size = Context.getTypeSize(Ty); + if (Ty->isStructureType() && Size == 0) + return ABIArgInfo::getIgnore(); + + // Expand structs with size <= 128-bits which consist only of + // basic types (int, long long, float, double, xxx*). This is + // non-recursive and does not ignore empty fields. + if (const RecordType *RT = Ty->getAsStructureType()) { + if (Context.getTypeSize(Ty) <= 4*32 && + areAllFields32Or64BitBasicType(RT->getDecl(), Context)) + return ABIArgInfo::getExpand(); + } + + return ABIArgInfo::getIndirect(0); + } else { + return ABIArgInfo::getDirect(); + } +} + +llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const { + const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); + + CGBuilderTy &Builder = CGF.Builder; + llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, + "ap"); + llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); + llvm::Type *PTy = + llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); + llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); + + uint64_t Offset = + llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); + llvm::Value *NextAddr = + Builder.CreateGEP(Addr, + llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), + "ap.next"); + Builder.CreateStore(NextAddr, VAListAddrAsBPP); + + return AddrTyped; +} + +namespace { +/// X86_64ABIInfo - The X86_64 ABI information. +class X86_64ABIInfo : public ABIInfo { + enum Class { + Integer = 0, + SSE, + SSEUp, + X87, + X87Up, + ComplexX87, + NoClass, + Memory + }; + + /// merge - Implement the X86_64 ABI merging algorithm. + /// + /// Merge an accumulating classification \arg Accum with a field + /// classification \arg Field. + /// + /// \param Accum - The accumulating classification. This should + /// always be either NoClass or the result of a previous merge + /// call. In addition, this should never be Memory (the caller + /// should just return Memory for the aggregate). + Class merge(Class Accum, Class Field) const; + + /// classify - Determine the x86_64 register classes in which the + /// given type T should be passed. + /// + /// \param Lo - The classification for the parts of the type + /// residing in the low word of the containing object. + /// + /// \param Hi - The classification for the parts of the type + /// residing in the high word of the containing object. + /// + /// \param OffsetBase - The bit offset of this type in the + /// containing object. Some parameters are classified different + /// depending on whether they straddle an eightbyte boundary. + /// + /// If a word is unused its result will be NoClass; if a type should + /// be passed in Memory then at least the classification of \arg Lo + /// will be Memory. + /// + /// The \arg Lo class will be NoClass iff the argument is ignored. + /// + /// If the \arg Lo class is ComplexX87, then the \arg Hi class will + /// also be ComplexX87. + void classify(QualType T, ASTContext &Context, uint64_t OffsetBase, + Class &Lo, Class &Hi) const; + + /// getCoerceResult - Given a source type \arg Ty and an LLVM type + /// to coerce to, chose the best way to pass Ty in the same place + /// that \arg CoerceTo would be passed, but while keeping the + /// emitted code as simple as possible. + /// + /// FIXME: Note, this should be cleaned up to just take an enumeration of all + /// the ways we might want to pass things, instead of constructing an LLVM + /// type. This makes this code more explicit, and it makes it clearer that we + /// are also doing this for correctness in the case of passing scalar types. + ABIArgInfo getCoerceResult(QualType Ty, + const llvm::Type *CoerceTo, + ASTContext &Context) const; + + /// getIndirectResult - Give a source type \arg Ty, return a suitable result + /// such that the argument will be passed in memory. + ABIArgInfo getIndirectResult(QualType Ty, + ASTContext &Context) const; + + ABIArgInfo classifyReturnType(QualType RetTy, + ASTContext &Context) const; + + ABIArgInfo classifyArgumentType(QualType Ty, + ASTContext &Context, + unsigned &neededInt, + unsigned &neededSSE) const; + +public: + virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const; + + virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const; +}; +} + +X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, + Class Field) const { + // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is + // classified recursively so that always two fields are + // considered. The resulting class is calculated according to + // the classes of the fields in the eightbyte: + // + // (a) If both classes are equal, this is the resulting class. + // + // (b) If one of the classes is NO_CLASS, the resulting class is + // the other class. + // + // (c) If one of the classes is MEMORY, the result is the MEMORY + // class. + // + // (d) If one of the classes is INTEGER, the result is the + // INTEGER. + // + // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, + // MEMORY is used as class. + // + // (f) Otherwise class SSE is used. + + // Accum should never be memory (we should have returned) or + // ComplexX87 (because this cannot be passed in a structure). + assert((Accum != Memory && Accum != ComplexX87) && + "Invalid accumulated classification during merge."); + if (Accum == Field || Field == NoClass) + return Accum; + else if (Field == Memory) + return Memory; + else if (Accum == NoClass) + return Field; + else if (Accum == Integer || Field == Integer) + return Integer; + else if (Field == X87 || Field == X87Up || Field == ComplexX87 || + Accum == X87 || Accum == X87Up) + return Memory; + else + return SSE; +} + +void X86_64ABIInfo::classify(QualType Ty, + ASTContext &Context, + uint64_t OffsetBase, + Class &Lo, Class &Hi) const { + // FIXME: This code can be simplified by introducing a simple value class for + // Class pairs with appropriate constructor methods for the various + // situations. + + // FIXME: Some of the split computations are wrong; unaligned vectors + // shouldn't be passed in registers for example, so there is no chance they + // can straddle an eightbyte. Verify & simplify. + + Lo = Hi = NoClass; + + Class &Current = OffsetBase < 64 ? Lo : Hi; + Current = Memory; + + if (const BuiltinType *BT = Ty->getAsBuiltinType()) { + BuiltinType::Kind k = BT->getKind(); + + if (k == BuiltinType::Void) { + Current = NoClass; + } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { + Lo = Integer; + Hi = Integer; + } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { + Current = Integer; + } else if (k == BuiltinType::Float || k == BuiltinType::Double) { + Current = SSE; + } else if (k == BuiltinType::LongDouble) { + Lo = X87; + Hi = X87Up; + } + // FIXME: _Decimal32 and _Decimal64 are SSE. + // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). + } else if (const EnumType *ET = Ty->getAsEnumType()) { + // Classify the underlying integer type. + classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi); + } else if (Ty->hasPointerRepresentation()) { + Current = Integer; + } else if (const VectorType *VT = Ty->getAsVectorType()) { + uint64_t Size = Context.getTypeSize(VT); + if (Size == 32) { + // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x + // float> as integer. + Current = Integer; + + // If this type crosses an eightbyte boundary, it should be + // split. + uint64_t EB_Real = (OffsetBase) / 64; + uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; + if (EB_Real != EB_Imag) + Hi = Lo; + } else if (Size == 64) { + // gcc passes <1 x double> in memory. :( + if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) + return; + + // gcc passes <1 x long long> as INTEGER. + if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong)) + Current = Integer; + else + Current = SSE; + + // If this type crosses an eightbyte boundary, it should be + // split. + if (OffsetBase && OffsetBase != 64) + Hi = Lo; + } else if (Size == 128) { + Lo = SSE; + Hi = SSEUp; + } + } else if (const ComplexType *CT = Ty->getAsComplexType()) { + QualType ET = Context.getCanonicalType(CT->getElementType()); + + uint64_t Size = Context.getTypeSize(Ty); + if (ET->isIntegralType()) { + if (Size <= 64) + Current = Integer; + else if (Size <= 128) + Lo = Hi = Integer; + } else if (ET == Context.FloatTy) + Current = SSE; + else if (ET == Context.DoubleTy) + Lo = Hi = SSE; + else if (ET == Context.LongDoubleTy) + Current = ComplexX87; + + // If this complex type crosses an eightbyte boundary then it + // should be split. + uint64_t EB_Real = (OffsetBase) / 64; + uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64; + if (Hi == NoClass && EB_Real != EB_Imag) + Hi = Lo; + } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { + // Arrays are treated like structures. + + uint64_t Size = Context.getTypeSize(Ty); + + // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger + // than two eightbytes, ..., it has class MEMORY. + if (Size > 128) + return; + + // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned + // fields, it has class MEMORY. + // + // Only need to check alignment of array base. + if (OffsetBase % Context.getTypeAlign(AT->getElementType())) + return; + + // Otherwise implement simplified merge. We could be smarter about + // this, but it isn't worth it and would be harder to verify. + Current = NoClass; + uint64_t EltSize = Context.getTypeSize(AT->getElementType()); + uint64_t ArraySize = AT->getSize().getZExtValue(); + for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { + Class FieldLo, FieldHi; + classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi); + Lo = merge(Lo, FieldLo); + Hi = merge(Hi, FieldHi); + if (Lo == Memory || Hi == Memory) + break; + } + + // Do post merger cleanup (see below). Only case we worry about is Memory. + if (Hi == Memory) + Lo = Memory; + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); + } else if (const RecordType *RT = Ty->getAsRecordType()) { + uint64_t Size = Context.getTypeSize(Ty); + + // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger + // than two eightbytes, ..., it has class MEMORY. + if (Size > 128) + return; + + const RecordDecl *RD = RT->getDecl(); + + // Assume variable sized types are passed in memory. + if (RD->hasFlexibleArrayMember()) + return; + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + // Reset Lo class, this will be recomputed. + Current = NoClass; + unsigned idx = 0; + for (RecordDecl::field_iterator i = RD->field_begin(Context), + e = RD->field_end(Context); i != e; ++i, ++idx) { + uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); + bool BitField = i->isBitField(); + + // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned + // fields, it has class MEMORY. + // + // Note, skip this test for bit-fields, see below. + if (!BitField && Offset % Context.getTypeAlign(i->getType())) { + Lo = Memory; + return; + } + + // Classify this field. + // + // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate + // exceeds a single eightbyte, each is classified + // separately. Each eightbyte gets initialized to class + // NO_CLASS. + Class FieldLo, FieldHi; + + // Bit-fields require special handling, they do not force the + // structure to be passed in memory even if unaligned, and + // therefore they can straddle an eightbyte. + if (BitField) { + // Ignore padding bit-fields. + if (i->isUnnamedBitfield()) + continue; + + uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); + uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue(); + + uint64_t EB_Lo = Offset / 64; + uint64_t EB_Hi = (Offset + Size - 1) / 64; + FieldLo = FieldHi = NoClass; + if (EB_Lo) { + assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); + FieldLo = NoClass; + FieldHi = Integer; + } else { + FieldLo = Integer; + FieldHi = EB_Hi ? Integer : NoClass; + } + } else + classify(i->getType(), Context, Offset, FieldLo, FieldHi); + Lo = merge(Lo, FieldLo); + Hi = merge(Hi, FieldHi); + if (Lo == Memory || Hi == Memory) + break; + } + + // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: + // + // (a) If one of the classes is MEMORY, the whole argument is + // passed in memory. + // + // (b) If SSEUP is not preceeded by SSE, it is converted to SSE. + + // The first of these conditions is guaranteed by how we implement + // the merge (just bail). + // + // The second condition occurs in the case of unions; for example + // union { _Complex double; unsigned; }. + if (Hi == Memory) + Lo = Memory; + if (Hi == SSEUp && Lo != SSE) + Hi = SSE; + } +} + +ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty, + const llvm::Type *CoerceTo, + ASTContext &Context) const { + if (CoerceTo == llvm::Type::Int64Ty) { + // Integer and pointer types will end up in a general purpose + // register. + if (Ty->isIntegralType() || Ty->isPointerType()) + return ABIArgInfo::getDirect(); + + } else if (CoerceTo == llvm::Type::DoubleTy) { + // FIXME: It would probably be better to make CGFunctionInfo only map using + // canonical types than to canonize here. + QualType CTy = Context.getCanonicalType(Ty); + + // Float and double end up in a single SSE reg. + if (CTy == Context.FloatTy || CTy == Context.DoubleTy) + return ABIArgInfo::getDirect(); + + } + + return ABIArgInfo::getCoerce(CoerceTo); +} + +ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, + ASTContext &Context) const { + // If this is a scalar LLVM value then assume LLVM will pass it in the right + // place naturally. + if (!CodeGenFunction::hasAggregateLLVMType(Ty)) + return ABIArgInfo::getDirect(); + + // FIXME: Set alignment correctly. + return ABIArgInfo::getIndirect(0); +} + +ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, + ASTContext &Context) const { + // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the + // classification algorithm. + X86_64ABIInfo::Class Lo, Hi; + classify(RetTy, Context, 0, Lo, Hi); + + // Check some invariants. + assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); + assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + + const llvm::Type *ResType = 0; + switch (Lo) { + case NoClass: + return ABIArgInfo::getIgnore(); + + case SSEUp: + case X87Up: + assert(0 && "Invalid classification for lo word."); + + // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via + // hidden argument. + case Memory: + return getIndirectResult(RetTy, Context); + + // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next + // available register of the sequence %rax, %rdx is used. + case Integer: + ResType = llvm::Type::Int64Ty; break; + + // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next + // available SSE register of the sequence %xmm0, %xmm1 is used. + case SSE: + ResType = llvm::Type::DoubleTy; break; + + // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is + // returned on the X87 stack in %st0 as 80-bit x87 number. + case X87: + ResType = llvm::Type::X86_FP80Ty; break; + + // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real + // part of the value is returned in %st0 and the imaginary part in + // %st1. + case ComplexX87: + assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); + ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty, + llvm::Type::X86_FP80Ty, + NULL); + break; + } + + switch (Hi) { + // Memory was handled previously and X87 should + // never occur as a hi class. + case Memory: + case X87: + assert(0 && "Invalid classification for hi word."); + + case ComplexX87: // Previously handled. + case NoClass: break; + + case Integer: + ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); + break; + case SSE: + ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); + break; + + // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte + // is passed in the upper half of the last used SSE register. + // + // SSEUP should always be preceeded by SSE, just widen. + case SSEUp: + assert(Lo == SSE && "Unexpected SSEUp classification."); + ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); + break; + + // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is + // returned together with the previous X87 value in %st0. + case X87Up: + // If X87Up is preceeded by X87, we don't need to do + // anything. However, in some cases with unions it may not be + // preceeded by X87. In such situations we follow gcc and pass the + // extra bits in an SSE reg. + if (Lo != X87) + ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); + break; + } + + return getCoerceResult(RetTy, ResType, Context); +} + +ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, + unsigned &neededInt, + unsigned &neededSSE) const { + X86_64ABIInfo::Class Lo, Hi; + classify(Ty, Context, 0, Lo, Hi); + + // Check some invariants. + // FIXME: Enforce these by construction. + assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); + assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + + neededInt = 0; + neededSSE = 0; + const llvm::Type *ResType = 0; + switch (Lo) { + case NoClass: + return ABIArgInfo::getIgnore(); + + // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument + // on the stack. + case Memory: + + // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or + // COMPLEX_X87, it is passed in memory. + case X87: + case ComplexX87: + return getIndirectResult(Ty, Context); + + case SSEUp: + case X87Up: + assert(0 && "Invalid classification for lo word."); + + // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next + // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 + // and %r9 is used. + case Integer: + ++neededInt; + ResType = llvm::Type::Int64Ty; + break; + + // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next + // available SSE register is used, the registers are taken in the + // order from %xmm0 to %xmm7. + case SSE: + ++neededSSE; + ResType = llvm::Type::DoubleTy; + break; + } + + switch (Hi) { + // Memory was handled previously, ComplexX87 and X87 should + // never occur as hi classes, and X87Up must be preceed by X87, + // which is passed in memory. + case Memory: + case X87: + case ComplexX87: + assert(0 && "Invalid classification for hi word."); + break; + + case NoClass: break; + case Integer: + ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); + ++neededInt; + break; + + // X87Up generally doesn't occur here (long double is passed in + // memory), except in situations involving unions. + case X87Up: + case SSE: + ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); + ++neededSSE; + break; + + // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the + // eightbyte is passed in the upper half of the last used SSE + // register. + case SSEUp: + assert(Lo == SSE && "Unexpected SSEUp classification."); + ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); + break; + } + + return getCoerceResult(Ty, ResType, Context); +} + +void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { + FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); + + // Keep track of the number of assigned registers. + unsigned freeIntRegs = 6, freeSSERegs = 8; + + // If the return value is indirect, then the hidden argument is consuming one + // integer register. + if (FI.getReturnInfo().isIndirect()) + --freeIntRegs; + + // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers + // get assigned (in left-to-right order) for passing as follows... + for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it) { + unsigned neededInt, neededSSE; + it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE); + + // AMD64-ABI 3.2.3p3: If there are no registers available for any + // eightbyte of an argument, the whole argument is passed on the + // stack. If registers have already been assigned for some + // eightbytes of such an argument, the assignments get reverted. + if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { + freeIntRegs -= neededInt; + freeSSERegs -= neededSSE; + } else { + it->info = getIndirectResult(it->type, Context); + } + } +} + +static l |