aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorManman Ren <mren@apple.com>2012-10-16 19:01:37 +0000
committerManman Ren <mren@apple.com>2012-10-16 19:01:37 +0000
commitd105e73368e677e65af724947be85ec87a0fa45e (patch)
tree64ffbfbde10a539b068c0b61560172eedeff434f /lib/CodeGen
parent9b29f4fe3d0600edf6ba00d48f2d4f2b1984f247 (diff)
ARM ABI: fix ABI alignment issues when passing legal vector types as varargs.
We create an aligned temporary space and copy the content over from ap.cur to the temporary space. This is necessary if the natural alignment of the type is greater than the ABI alignment. rdar://12439123 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@166040 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/TargetInfo.cpp44
1 files changed, 38 insertions, 6 deletions
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index 75cc5a37a5..b43d33198f 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -3169,27 +3169,59 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- // Handle address alignment for type alignment > 32 bits
+
+ uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+
+ // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
+ // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
+ if (Ty->getAs<VectorType>()) {
+ if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
+ getABIKind() == ARMABIInfo::AAPCS)
+ TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
+ else
+ TyAlign = 4;
+ }
+
+ // Handle address alignment for ABI alignment > 4 bytes.
if (TyAlign > 4) {
assert((TyAlign & (TyAlign - 1)) == 0 &&
"Alignment is not power of 2!");
llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
}
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
uint64_t Offset =
- llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::RoundUpToAlignment(Size, 4);
llvm::Value *NextAddr =
Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+ if (Ty->getAs<VectorType>() &&
+ (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8)) {
+ // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
+ // may not be correctly aligned for the vector type. We create an aligned
+ // temporary space and copy the content over from ap.cur to the temporary
+ // space. This is necessary if the natural alignment of the type is greater
+ // than the ABI alignment.
+ llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
+ CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
+ llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
+ "var.align");
+ llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
+ llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
+ Builder.CreateMemCpy(Dst, Src,
+ llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
+ TyAlign, false);
+ Addr = AlignedTemp; //The content is in aligned location.
+ }
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
return AddrTyped;
}