diff options
author | Mike Stump <mrs@apple.com> | 2009-09-09 15:08:12 +0000 |
---|---|---|
committer | Mike Stump <mrs@apple.com> | 2009-09-09 15:08:12 +0000 |
commit | 1eb4433ac451dc16f4133a88af2d002ac26c58ef (patch) | |
tree | 07065b80cb7787bb7b9ffcb985196007a57e86f7 /lib/CodeGen/CGCall.cpp | |
parent | 79d39f92590cf2e91bf81486b02cd1156d13ca54 (diff) |
Remove tabs, and whitespace cleanups.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@81346 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/CGCall.cpp')
-rw-r--r-- | lib/CodeGen/CGCall.cpp | 146 |
1 files changed, 73 insertions, 73 deletions
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp index 0a187fca76..2da16a15ac 100644 --- a/lib/CodeGen/CGCall.cpp +++ b/lib/CodeGen/CGCall.cpp @@ -33,13 +33,13 @@ using namespace CodeGen; // FIXME: Use iterator and sidestep silly type array creation. -const +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) { - return getFunctionInfo(FTNP->getResultType(), + return getFunctionInfo(FTNP->getResultType(), llvm::SmallVector<QualType, 16>()); } -const +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) { llvm::SmallVector<QualType, 16> ArgTys; // FIXME: Kill copy. @@ -53,7 +53,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) { // Add the 'this' pointer unless this is a static method. if (MD->isInstance()) ArgTys.push_back(MD->getThisType(Context)); - + const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType(); for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) ArgTys.push_back(FTP->getArgType(i)); @@ -64,7 +64,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) { if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) if (MD->isInstance()) return getFunctionInfo(MD); - + const FunctionType *FTy = FD->getType()->getAsFunctionType(); if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy)) return getFunctionInfo(FTP); @@ -82,21 +82,21 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) { return getFunctionInfo(MD->getResultType(), ArgTys); } -const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, const CallArgList &Args) { // FIXME: Kill copy. llvm::SmallVector<QualType, 16> ArgTys; - for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); + for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i) ArgTys.push_back(i->second); return getFunctionInfo(ResTy, ArgTys); } -const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, const FunctionArgList &Args) { // FIXME: Kill copy. llvm::SmallVector<QualType, 16> ArgTys; - for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); + for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); i != e; ++i) ArgTys.push_back(i->second); return getFunctionInfo(ResTy, ArgTys); @@ -123,7 +123,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, return *FI; } -CGFunctionInfo::CGFunctionInfo(QualType ResTy, +CGFunctionInfo::CGFunctionInfo(QualType ResTy, const llvm::SmallVector<QualType, 16> &ArgTys) { NumArgs = ArgTys.size(); Args = new ArgInfo[1 + NumArgs]; @@ -134,20 +134,20 @@ CGFunctionInfo::CGFunctionInfo(QualType ResTy, /***/ -void CodeGenTypes::GetExpandedTypes(QualType Ty, +void CodeGenTypes::GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys) { const RecordType *RT = Ty->getAsStructureType(); assert(RT && "Can only expand structure types."); const RecordDecl *RD = RT->getDecl(); - assert(!RD->hasFlexibleArrayMember() && + assert(!RD->hasFlexibleArrayMember() && "Cannot expand structure with flexible array."); - + for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i) { const FieldDecl *FD = *i; - assert(!FD->isBitField() && + assert(!FD->isBitField() && "Cannot expand structure with bit-field members."); - + QualType FT = FD->getType(); if (CodeGenFunction::hasAggregateLLVMType(FT)) { GetExpandedTypes(FT, ArgTys); @@ -157,19 +157,19 @@ void CodeGenTypes::GetExpandedTypes(QualType Ty, } } -llvm::Function::arg_iterator +llvm::Function::arg_iterator CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, llvm::Function::arg_iterator AI) { const RecordType *RT = Ty->getAsStructureType(); assert(RT && "Can only expand structure types."); RecordDecl *RD = RT->getDecl(); - assert(LV.isSimple() && - "Unexpected non-simple lvalue during struct expansion."); + assert(LV.isSimple() && + "Unexpected non-simple lvalue during struct expansion."); llvm::Value *Addr = LV.getAddress(); for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i) { - FieldDecl *FD = *i; + FieldDecl *FD = *i; QualType FT = FD->getType(); // FIXME: What are the right qualifiers here? @@ -185,8 +185,8 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, return AI; } -void -CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, +void +CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, llvm::SmallVector<llvm::Value*, 16> &Args) { const RecordType *RT = Ty->getAsStructureType(); assert(RT && "Can only expand structure types."); @@ -196,16 +196,16 @@ CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, llvm::Value *Addr = RV.getAggregateAddr(); for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i) { - FieldDecl *FD = *i; + FieldDecl *FD = *i; QualType FT = FD->getType(); - + // FIXME: What are the right qualifiers here? LValue LV = EmitLValueForField(Addr, FD, false, 0); if (CodeGenFunction::hasAggregateLLVMType(FT)) { ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); } else { RValue RV = EmitLoadOfLValue(LV, FT); - assert(RV.isScalar() && + assert(RV.isScalar() && "Unexpected non-scalar rvalue during struct expansion."); Args.push_back(RV.getScalarVal()); } @@ -221,7 +221,7 @@ CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, const llvm::Type *Ty, CodeGenFunction &CGF) { - const llvm::Type *SrcTy = + const llvm::Type *SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty); @@ -244,9 +244,9 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, // Otherwise do coercion through memory. This is stupid, but // simple. llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); - llvm::Value *Casted = + llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); - llvm::StoreInst *Store = + llvm::StoreInst *Store = CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); // FIXME: Use better alignment / avoid requiring aligned store. Store->setAlignment(1); @@ -263,7 +263,7 @@ static void CreateCoercedStore(llvm::Value *Src, llvm::Value *DstPtr, CodeGenFunction &CGF) { const llvm::Type *SrcTy = Src->getType(); - const llvm::Type *DstTy = + const llvm::Type *DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); @@ -287,7 +287,7 @@ static void CreateCoercedStore(llvm::Value *Src, // to that information. llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); CGF.Builder.CreateStore(Src, Tmp); - llvm::Value *Casted = + llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); // FIXME: Use better alignment / avoid requiring aligned load. @@ -335,11 +335,11 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { ResultType = RetAI.getCoerceToType(); break; } - - for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), + + for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); it != ie; ++it) { const ABIArgInfo &AI = it->info; - + switch (AI.getKind()) { case ABIArgInfo::Ignore: break; @@ -359,7 +359,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { case ABIArgInfo::Direct: ArgTys.push_back(ConvertType(it->type)); break; - + case ABIArgInfo::Expand: GetExpandedTypes(it->type, ArgTys); break; @@ -414,7 +414,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, break; case ABIArgInfo::Indirect: - PAL.push_back(llvm::AttributeWithIndex::get(Index, + PAL.push_back(llvm::AttributeWithIndex::get(Index, llvm::Attribute::StructRet | llvm::Attribute::NoAlias)); ++Index; @@ -428,7 +428,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, break; case ABIArgInfo::Expand: - assert(0 && "Invalid ABI kind for return argument"); + assert(0 && "Invalid ABI kind for return argument"); } if (RetAttrs) @@ -439,12 +439,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, // register variable. signed RegParm = 0; if (TargetDecl) - if (const RegparmAttr *RegParmAttr + if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>()) RegParm = RegParmAttr->getNumParams(); unsigned PointerWidth = getContext().Target.getPointerWidth(0); - for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), + for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); it != ie; ++it) { QualType ParamType = it->type; const ABIArgInfo &AI = it->info; @@ -483,10 +483,10 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, case ABIArgInfo::Ignore: // Skip increment, no matching LLVM parameter. - continue; + continue; case ABIArgInfo::Expand: { - std::vector<const llvm::Type*> Tys; + std::vector<const llvm::Type*> Tys; // FIXME: This is rather inefficient. Do we ever actually need to do // anything here? The result should be just reconstructed on the other // side, so extension should be a non-issue. @@ -495,7 +495,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, continue; } } - + if (Attributes) PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes)); ++Index; @@ -525,13 +525,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, // Emit allocs for param decls. Give the LLVM Argument nodes names. llvm::Function::arg_iterator AI = Fn->arg_begin(); - + // Name the struct return argument. if (CGM.ReturnTypeUsesSret(FI)) { AI->setName("agg.result"); ++AI; } - + assert(FI.arg_size() == Args.size() && "Mismatch between function signature & arguments."); CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); @@ -556,7 +556,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, V = EmitScalarConversion(V, Ty, Arg->getType()); } } - EmitParmDecl(*Arg, V); + EmitParmDecl(*Arg, V); break; } @@ -580,17 +580,17 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, EmitParmDecl(*Arg, V); break; } - + case ABIArgInfo::Expand: { // If this structure was expanded into multiple arguments then // we need to create a temporary and reconstruct it from the // arguments. std::string Name = Arg->getNameAsString(); - llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty), + llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty), (Name + ".addr").c_str()); // FIXME: What are the right qualifiers here? - llvm::Function::arg_iterator End = - ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI); + llvm::Function::arg_iterator End = + ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI); EmitParmDecl(*Arg, Temp); // Name the arguments used in expansion and increment AI. @@ -602,14 +602,14 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, case ABIArgInfo::Ignore: // Initialize the local variable appropriately. - if (hasAggregateLLVMType(Ty)) { + if (hasAggregateLLVMType(Ty)) { EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty))); } else { EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType()))); } - + // Skip increment, no matching LLVM parameter. - continue; + continue; case ABIArgInfo::Coerce: { assert(AI != Fn->arg_end() && "Argument mismatch!"); @@ -668,16 +668,16 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, case ABIArgInfo::Ignore: break; - + case ABIArgInfo::Coerce: RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this); break; case ABIArgInfo::Expand: - assert(0 && "Invalid ABI kind for return argument"); + assert(0 && "Invalid ABI kind for return argument"); } } - + if (RV) { Builder.CreateRet(RV); } else { @@ -688,12 +688,12 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) { if (ArgType->isReferenceType()) return EmitReferenceBindingToExpr(E, ArgType); - + return EmitAnyExprToTemp(E); } RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, - llvm::Value *Callee, + llvm::Value *Callee, const CallArgList &CallArgs, const Decl *TargetDecl) { // FIXME: We no longer need the types from CallArgs; lift up and simplify. @@ -703,17 +703,17 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // location that we would like to return into. QualType RetTy = CallInfo.getReturnType(); const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); - - + + // If the call returns a temporary with struct return, create a temporary // alloca to hold the result. if (CGM.ReturnTypeUsesSret(CallInfo)) Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy))); - + assert(CallInfo.arg_size() == CallArgs.size() && "Mismatch between function signature & arguments."); CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); - for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); + for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); I != E; ++I, ++info_it) { const ABIArgInfo &ArgInfo = info_it->info; RValue RV = I->first; @@ -726,7 +726,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, if (RV.isScalar()) EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second); else - StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); + StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); } else { Args.push_back(RV.getAggregateAddr()); } @@ -745,7 +745,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Args.push_back(Builder.CreateLoad(RV.getAggregateAddr())); } break; - + case ABIArgInfo::Ignore: break; @@ -758,9 +758,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, } else if (RV.isComplex()) { SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); - } else + } else SrcPtr = RV.getAggregateAddr(); - Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), + Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this)); break; } @@ -770,7 +770,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, break; } } - + // If the callee is a bitcast of a function to a varargs pointer to function // type, check to see if we can remove the bitcast. This handles some cases // with unprototyped functions. @@ -780,7 +780,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, const llvm::FunctionType *CurFT = cast<llvm::FunctionType>(CurPT->getElementType()); const llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); - + if (CE->getOpcode() == llvm::Instruction::BitCast && ActualFT->getReturnType() == CurFT->getReturnType() && ActualFT->getNumParams() == CurFT->getNumParams() && @@ -791,7 +791,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, ArgsMatch = false; break; } - + // Strip the cast if we can get away with it. This is a nice cleanup, // but also allows us to inline the function at -O0 if it is marked // always_inline. @@ -799,20 +799,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Callee = CalleeF; } } - + llvm::BasicBlock *InvokeDest = getInvokeDest(); CodeGen::AttributeListType AttributeList; CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList); llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), AttributeList.end()); - + llvm::CallSite CS; if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) { CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size()); } else { llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); - CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, + CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args.data(), Args.data()+Args.size()); EmitBlock(Cont); } @@ -828,15 +828,15 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, if (CS.doesNotReturn()) { Builder.CreateUnreachable(); Builder.ClearInsertionPoint(); - + // FIXME: For now, emit a dummy basic block because expr emitters in // generally are not ready to handle emitting expressions at unreachable // points. EnsureInsertPoint(); - + // Return a reasonable RValue. return GetUndefRValue(RetTy); - } + } llvm::Instruction *CI = CS.getInstruction(); if (Builder.isNamePreserving() && @@ -882,7 +882,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, } case ABIArgInfo::Expand: - assert(0 && "Invalid ABI kind for return argument"); + assert(0 && "Invalid ABI kind for return argument"); } assert(0 && "Unhandled ABIArgInfo::Kind"); |