aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/CGCall.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/CGCall.cpp')
-rw-r--r--lib/CodeGen/CGCall.cpp124
1 files changed, 55 insertions, 69 deletions
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 6aabd64f9b..b6ec67d83c 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -1278,7 +1278,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Indirect: {
llvm::Value *V = AI;
- if (hasAggregateLLVMType(Ty)) {
+ if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
// need to do is realign the value, if requested
if (ArgI.getIndirectRealign()) {
@@ -1411,7 +1411,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Match to what EmitParmDecl is expecting for this type.
- if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
@@ -1440,7 +1440,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Ignore:
// Initialize the local variable appropriately.
- if (hasAggregateLLVMType(Ty))
+ if (!hasScalarEvaluationKind(Ty))
EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
else
EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
@@ -1664,15 +1664,23 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
switch (RetAI.getKind()) {
case ABIArgInfo::Indirect: {
- unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
- if (RetTy->isAnyComplexType()) {
- ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
- StoreComplexToAddr(RT, CurFn->arg_begin(), false);
- } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ switch (getEvaluationKind(RetTy)) {
+ case TEK_Complex: {
+ ComplexPairTy RT =
+ EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy));
+ EmitStoreOfComplex(RT,
+ MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
+ /*isInit*/ true);
+ break;
+ }
+ case TEK_Aggregate:
// Do nothing; aggregrates get evaluated directly into the destination.
- } else {
- EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
- false, Alignment, RetTy);
+ break;
+ case TEK_Scalar:
+ EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
+ MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
+ /*isInit*/ true);
+ break;
}
break;
}
@@ -1749,10 +1757,10 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
// For the most part, we just need to load the alloca, except:
// 1) aggregate r-values are actually pointers to temporaries, and
- // 2) references to aggregates are pointers directly to the aggregate.
- // I don't know why references to non-aggregates are different here.
+ // 2) references to non-scalars are pointers directly to the aggregate.
+ // I don't know why references to scalars are different here.
if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
- if (hasAggregateLLVMType(ref->getPointeeType()))
+ if (!hasScalarEvaluationKind(ref->getPointeeType()))
return args.add(RValue::getAggregate(local), type);
// Locals which are references to scalars are represented
@@ -1760,17 +1768,7 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
return args.add(RValue::get(Builder.CreateLoad(local)), type);
}
- if (type->isAnyComplexType()) {
- ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
- return args.add(RValue::getComplex(complex), type);
- }
-
- if (hasAggregateLLVMType(type))
- return args.add(RValue::getAggregate(local), type);
-
- unsigned alignment = getContext().getDeclAlign(param).getQuantity();
- llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
- return args.add(RValue::get(value), type);
+ args.add(convertTempToRValue(local, type), type);
}
static bool isProvablyNull(llvm::Value *addr) {
@@ -1935,7 +1933,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
type);
}
- if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
+ if (hasAggregateEvaluationKind(type) &&
isa<ImplicitCastExpr>(E) &&
cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
@@ -2079,15 +2077,7 @@ void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
llvm::Value *Addr = RV.getAggregateAddr();
for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
- LValue LV = MakeAddrLValue(EltAddr, EltTy);
- RValue EltRV;
- if (EltTy->isAnyComplexType())
- // FIXME: Volatile?
- EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
- else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
- EltRV = LV.asAggregateRValue();
- else
- EltRV = EmitLoadOfLValue(LV);
+ RValue EltRV = convertTempToRValue(EltAddr, EltTy);
ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
}
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
@@ -2180,8 +2170,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->RV;
- unsigned TypeAlign =
- getContext().getTypeAlignInChars(I->Ty).getQuantity();
+ CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
// Insert a padding argument to ensure proper alignment.
if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
@@ -2197,12 +2186,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (ArgInfo.getIndirectAlign() > AI->getAlignment())
AI->setAlignment(ArgInfo.getIndirectAlign());
Args.push_back(AI);
+
+ LValue argLV =
+ MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
if (RV.isScalar())
- EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
- TypeAlign, I->Ty);
+ EmitStoreOfScalar(RV.getScalarVal(), argLV, /*init*/ true);
else
- StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+ EmitStoreOfComplex(RV.getComplexVal(), argLV, /*init*/ true);
// Validate argument match.
checkArgMatches(AI, IRArgNo, IRFuncTy);
@@ -2217,7 +2208,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
- (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
+ (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
// Create an aligned temporary, and copy to it.
llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
@@ -2266,12 +2257,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// FIXME: Avoid the conversion through memory if possible.
llvm::Value *SrcPtr;
- if (RV.isScalar()) {
- SrcPtr = CreateMemTemp(I->Ty, "coerce");
- EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
- } else if (RV.isComplex()) {
+ if (RV.isScalar() || RV.isComplex()) {
SrcPtr = CreateMemTemp(I->Ty, "coerce");
- StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
+ LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
+ if (RV.isScalar()) {
+ EmitStoreOfScalar(RV.getScalarVal(), SrcLV, /*init*/ true);
+ } else {
+ EmitStoreOfComplex(RV.getComplexVal(), SrcLV, /*init*/ true);
+ }
} else
SrcPtr = RV.getAggregateAddr();
@@ -2424,14 +2417,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
emitWritebacks(*this, CallArgs);
switch (RetAI.getKind()) {
- case ABIArgInfo::Indirect: {
- unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
- if (RetTy->isAnyComplexType())
- return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
- if (CodeGenFunction::hasAggregateLLVMType(RetTy))
- return RValue::getAggregate(Args[0]);
- return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
- }
+ case ABIArgInfo::Indirect:
+ return convertTempToRValue(Args[0], RetTy);
case ABIArgInfo::Ignore:
// If we are ignoring an argument that had a result, make sure to
@@ -2442,12 +2429,13 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Direct: {
llvm::Type *RetIRTy = ConvertType(RetTy);
if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
- if (RetTy->isAnyComplexType()) {
+ switch (getEvaluationKind(RetTy)) {
+ case TEK_Complex: {
llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
return RValue::getComplex(std::make_pair(Real, Imag));
}
- if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ case TEK_Aggregate: {
llvm::Value *DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
@@ -2458,13 +2446,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
return RValue::getAggregate(DestPtr);
}
-
- // If the argument doesn't match, perform a bitcast to coerce it. This
- // can happen due to trivial type mismatches.
- llvm::Value *V = CI;
- if (V->getType() != RetIRTy)
- V = Builder.CreateBitCast(V, RetIRTy);
- return RValue::get(V);
+ case TEK_Scalar: {
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ llvm::Value *V = CI;
+ if (V->getType() != RetIRTy)
+ V = Builder.CreateBitCast(V, RetIRTy);
+ return RValue::get(V);
+ }
+ }
+ llvm_unreachable("bad evaluation kind");
}
llvm::Value *DestPtr = ReturnValue.getValue();
@@ -2485,12 +2476,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
- unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
- if (RetTy->isAnyComplexType())
- return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
- if (CodeGenFunction::hasAggregateLLVMType(RetTy))
- return RValue::getAggregate(DestPtr);
- return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
+ return convertTempToRValue(DestPtr, RetTy);
}
case ABIArgInfo::Expand: