diff options
author | John McCall <rjmccall@apple.com> | 2013-01-22 03:56:22 +0000 |
---|---|---|
committer | John McCall <rjmccall@apple.com> | 2013-01-22 03:56:22 +0000 |
commit | b62faef5ec86c1931785ffa805ece9b491735894 (patch) | |
tree | 997ca498944f0ebbbff626dab34293cbda8577c1 | |
parent | 614323cc1cd3af406ed697bed7324f76f871419e (diff) |
Use the correct field to copy/dispose a __block variable.
We were previously hard-coding a particular field index. This was
fine before (because we were obviously guaranteed the presence
of a copy/dispose member) except for (1) alignment padding and
(2) future extensions adding extra members to the header, such
as the extended-layout pointer.
Note that we only introduce the extended-layout pointer in the
presence of structs. (We also seem to be introducing it even
in the presence of an all-non-object layout, but that's a
different potential issue.)
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@173122 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/CodeGen/CGBlocks.cpp | 55 | ||||
-rw-r--r-- | test/CodeGenObjCXX/arc-blocks.mm | 49 |
2 files changed, 90 insertions, 14 deletions
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp index b199e76325..2d51a154ad 100644 --- a/lib/CodeGen/CGBlocks.cpp +++ b/lib/CodeGen/CGBlocks.cpp @@ -1246,7 +1246,14 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, */ - +/// Generate the copy-helper function for a block closure object: +/// static void block_copy_helper(block_t *dst, block_t *src); +/// The runtime will have previously initialized 'dst' by doing a +/// bit-copy of 'src'. +/// +/// Note that this copies an entire block closure object to the heap; +/// it should not be confused with a 'byref copy helper', which moves +/// the contents of an individual __block variable to the heap. llvm::Constant * CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) { ASTContext &C = getContext(); @@ -1402,6 +1409,13 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) { return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); } +/// Generate the destroy-helper function for a block closure object: +/// static void block_destroy_helper(block_t *theBlock); +/// +/// Note that this destroys a heap-allocated block closure object; +/// it should not be confused with a 'byref destroy helper', which +/// destroys the heap-allocated contents of an individual __block +/// variable. llvm::Constant * CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) { ASTContext &C = getContext(); @@ -1687,6 +1701,7 @@ public: static llvm::Constant * generateByrefCopyHelper(CodeGenFunction &CGF, llvm::StructType &byrefType, + unsigned valueFieldIndex, CodeGenModule::ByrefHelpers &byrefInfo) { ASTContext &Context = CGF.getContext(); @@ -1735,13 +1750,13 @@ generateByrefCopyHelper(CodeGenFunction &CGF, llvm::Value *destField = CGF.GetAddrOfLocalVar(&dst); destField = CGF.Builder.CreateLoad(destField); destField = CGF.Builder.CreateBitCast(destField, byrefPtrType); - destField = CGF.Builder.CreateStructGEP(destField, 6, "x"); + destField = CGF.Builder.CreateStructGEP(destField, valueFieldIndex, "x"); // src->x llvm::Value *srcField = CGF.GetAddrOfLocalVar(&src); srcField = CGF.Builder.CreateLoad(srcField); srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType); - srcField = CGF.Builder.CreateStructGEP(srcField, 6, "x"); + srcField = CGF.Builder.CreateStructGEP(srcField, valueFieldIndex, "x"); byrefInfo.emitCopy(CGF, destField, srcField); } @@ -1754,15 +1769,17 @@ generateByrefCopyHelper(CodeGenFunction &CGF, /// Build the copy helper for a __block variable. static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM, llvm::StructType &byrefType, + unsigned byrefValueIndex, CodeGenModule::ByrefHelpers &info) { CodeGenFunction CGF(CGM); - return generateByrefCopyHelper(CGF, byrefType, info); + return generateByrefCopyHelper(CGF, byrefType, byrefValueIndex, info); } /// Generate code for a __block variable's dispose helper. static llvm::Constant * generateByrefDisposeHelper(CodeGenFunction &CGF, llvm::StructType &byrefType, + unsigned byrefValueIndex, CodeGenModule::ByrefHelpers &byrefInfo) { ASTContext &Context = CGF.getContext(); QualType R = Context.VoidTy; @@ -1804,7 +1821,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF, llvm::Value *V = CGF.GetAddrOfLocalVar(&src); V = CGF.Builder.CreateLoad(V); V = CGF.Builder.CreateBitCast(V, byrefType.getPointerTo(0)); - V = CGF.Builder.CreateStructGEP(V, 6, "x"); + V = CGF.Builder.CreateStructGEP(V, byrefValueIndex, "x"); byrefInfo.emitDispose(CGF, V); } @@ -1817,14 +1834,17 @@ generateByrefDisposeHelper(CodeGenFunction &CGF, /// Build the dispose helper for a __block variable. static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM, llvm::StructType &byrefType, + unsigned byrefValueIndex, CodeGenModule::ByrefHelpers &info) { CodeGenFunction CGF(CGM); - return generateByrefDisposeHelper(CGF, byrefType, info); + return generateByrefDisposeHelper(CGF, byrefType, byrefValueIndex, info); } -/// +/// Lazily build the copy and dispose helpers for a __block variable +/// with the given information. template <class T> static T *buildByrefHelpers(CodeGenModule &CGM, llvm::StructType &byrefTy, + unsigned byrefValueIndex, T &byrefInfo) { // Increase the field's alignment to be at least pointer alignment, // since the layout of the byref struct will guarantee at least that. @@ -1839,26 +1859,33 @@ template <class T> static T *buildByrefHelpers(CodeGenModule &CGM, = CGM.ByrefHelpersCache.FindNodeOrInsertPos(id, insertPos); if (node) return static_cast<T*>(node); - byrefInfo.CopyHelper = buildByrefCopyHelper(CGM, byrefTy, byrefInfo); - byrefInfo.DisposeHelper = buildByrefDisposeHelper(CGM, byrefTy, byrefInfo); + byrefInfo.CopyHelper = + buildByrefCopyHelper(CGM, byrefTy, byrefValueIndex, byrefInfo); + byrefInfo.DisposeHelper = + buildByrefDisposeHelper(CGM, byrefTy, byrefValueIndex,byrefInfo); T *copy = new (CGM.getContext()) T(byrefInfo); CGM.ByrefHelpersCache.InsertNode(copy, insertPos); return copy; } +/// Build the copy and dispose helpers for the given __block variable +/// emission. Places the helpers in the global cache. Returns null +/// if no helpers are required. CodeGenModule::ByrefHelpers * CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType, const AutoVarEmission &emission) { const VarDecl &var = *emission.Variable; QualType type = var.getType(); + unsigned byrefValueIndex = getByRefValueLLVMField(&var); + if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) { const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(&var); if (!copyExpr && record->hasTrivialDestructor()) return 0; CXXByrefHelpers byrefInfo(emission.Alignment, type, copyExpr); - return ::buildByrefHelpers(CGM, byrefType, byrefInfo); + return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); } // Otherwise, if we don't have a retainable type, there's nothing to do. @@ -1883,7 +1910,7 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType, // byref routines. case Qualifiers::OCL_Weak: { ARCWeakByrefHelpers byrefInfo(emission.Alignment); - return ::buildByrefHelpers(CGM, byrefType, byrefInfo); + return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); } // ARC __strong __block variables need to be retained. @@ -1892,13 +1919,13 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType, // transfer possible. if (type->isBlockPointerType()) { ARCStrongBlockByrefHelpers byrefInfo(emission.Alignment); - return ::buildByrefHelpers(CGM, byrefType, byrefInfo); + return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); // Otherwise, we transfer ownership of the retain from the stack // to the heap. } else { ARCStrongByrefHelpers byrefInfo(emission.Alignment); - return ::buildByrefHelpers(CGM, byrefType, byrefInfo); + return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); } } llvm_unreachable("fell out of lifetime switch!"); @@ -1918,7 +1945,7 @@ CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType, flags |= BLOCK_FIELD_IS_WEAK; ObjectByrefHelpers byrefInfo(emission.Alignment, flags); - return ::buildByrefHelpers(CGM, byrefType, byrefInfo); + return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); } unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const { diff --git a/test/CodeGenObjCXX/arc-blocks.mm b/test/CodeGenObjCXX/arc-blocks.mm new file mode 100644 index 0000000000..810c0e09cc --- /dev/null +++ b/test/CodeGenObjCXX/arc-blocks.mm @@ -0,0 +1,49 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -fobjc-runtime-has-weak -fblocks -fobjc-arc -o - %s | FileCheck %s + +// CHECK: [[A:.*]] = type { i64, [10 x i8*] } + +// CHECK: [[LAYOUT0:@.*]] = internal global [3 x i8] c" 9\00" + +// rdar://13045269 +// If a __block variable requires extended layout information *and* +// a copy/dispose helper, be sure to adjust the offsets used in copy/dispose. +namespace test0 { + struct A { + unsigned long count; + id data[10]; + }; + + void foo() { + __block A v; + } + // CHECK: define void @_ZN5test03fooEv() + // CHECK: [[V:%.*]] = alloca [[BYREF_A:%.*]], align 8 + // CHECK: [[T0:%.*]] = getelementptr inbounds [[BYREF_A]]* [[V]], i32 0, i32 4 + // CHECK-NEXT: store i8* bitcast (void (i8*, i8*)* [[COPY_HELPER:@.*]] to i8*), i8** [[T0]] + // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[BYREF_A]]* [[V]], i32 0, i32 5 + // CHECK-NEXT: store i8* bitcast (void (i8*)* [[DISPOSE_HELPER:@.*]] to i8*), i8** [[T0]] + // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[BYREF_A]]* [[V]], i32 0, i32 6 + // CHECK-NEXT: store i8* getelementptr inbounds ([3 x i8]* [[LAYOUT0]], i32 0, i32 0), i8** [[T0]] + // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[BYREF_A]]* [[V]], i32 0, i32 7 + // CHECK-NEXT: call void @_ZN5test01AC1Ev([[A]]* [[T0]]) + // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[BYREF_A]]* [[V]], i32 0, i32 7 + // CHECK-NEXT: [[T1:%.*]] = bitcast [[BYREF_A]]* [[V]] to i8* + // CHECK-NEXT: call void @_Block_object_dispose(i8* [[T1]], i32 8) + // CHECK-NEXT: call void @_ZN5test01AD1Ev([[A]]* [[T0]]) + // CHECK-NEXT: ret void + + // CHECK: define internal void [[COPY_HELPER]]( + // CHECK: [[T0:%.*]] = bitcast i8* {{.*}} to [[BYREF_A]]* + // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[BYREF_A]]* [[T0]], i32 0, i32 7 + // CHECK-NEXT: load + // CHECK-NEXT: [[T2:%.*]] = bitcast i8* {{.*}} to [[BYREF_A]]* + // CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds [[BYREF_A]]* [[T2]], i32 0, i32 7 + // CHECK-NEXT: call void @_ZN5test01AC1ERKS0_([[A]]* [[T1]], [[A]]* [[T3]]) + // CHECK-NEXT: ret void + + // CHECK: define internal void [[DISPOSE_HELPER]]( + // CHECK: [[T0:%.*]] = bitcast i8* {{.*}} to [[BYREF_A]]* + // CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds [[BYREF_A]]* [[T0]], i32 0, i32 7 + // CHECK-NEXT: call void @_ZN5test01AD1Ev([[A]]* [[T1]]) + // CHECK-NEXT: ret void +} |