diff options
-rw-r--r-- | lib/CodeGen/CGBuiltin.cpp | 11 | ||||
-rw-r--r-- | lib/CodeGen/ItaniumCXXABI.cpp | 39 | ||||
-rw-r--r-- | test/CodeGen/Atomics.c | 17 | ||||
-rw-r--r-- | test/CodeGen/atomic.c | 6 | ||||
-rw-r--r-- | test/CodeGenCXX/static-init.cpp | 4 |
5 files changed, 31 insertions, 46 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 0c93ed36c1..271fe66ffe 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -900,13 +900,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, case Builtin::BI__sync_lock_release_8: case Builtin::BI__sync_lock_release_16: { Value *Ptr = EmitScalarExpr(E->getArg(0)); - llvm::Type *ElTy = + llvm::Type *ElLLVMTy = cast<llvm::PointerType>(Ptr->getType())->getElementType(); llvm::StoreInst *Store = - Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr); - // FIXME: This is completely, utterly wrong; it only even sort-of works - // on x86. - Store->setVolatile(true); + Builder.CreateStore(llvm::Constant::getNullValue(ElLLVMTy), Ptr); + QualType ElTy = E->getArg(0)->getType()->getPointeeType(); + CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); + Store->setAlignment(StoreSize.getQuantity()); + Store->setAtomic(llvm::Release); return RValue::get(0); } diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp index cbcc4491c5..d0ec3eb4fa 100644 --- a/lib/CodeGen/ItaniumCXXABI.cpp +++ b/lib/CodeGen/ItaniumCXXABI.cpp @@ -1130,20 +1130,27 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, } else { // Load the first byte of the guard variable. llvm::Type *PtrTy = Builder.getInt8PtrTy(); - llvm::Value *V = + llvm::LoadInst *LI = Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp"); - - IsInitialized = Builder.CreateIsNull(V, "guard.uninitialized"); + LI->setAlignment(1); + + // Itanium ABI: + // An implementation supporting thread-safety on multiprocessor + // systems must also guarantee that references to the initialized + // object do not occur before the load of the initialization flag. + // + // In LLVM, we do this by marking the load Acquire. + if (threadsafe) + LI->setAtomic(llvm::Acquire); + + IsInitialized = Builder.CreateIsNull(LI, "guard.uninitialized"); } llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); - llvm::BasicBlock *NoCheckBlock = EndBlock; - if (threadsafe) NoCheckBlock = CGF.createBasicBlock("init.barrier"); - // Check if the first byte of the guard variable is zero. - Builder.CreateCondBr(IsInitialized, InitCheckBlock, NoCheckBlock); + Builder.CreateCondBr(IsInitialized, InitCheckBlock, EndBlock); CGF.EmitBlock(InitCheckBlock); @@ -1177,23 +1184,5 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, Builder.CreateStore(llvm::ConstantInt::get(GuardTy, 1), GuardVariable); } - // Emit an acquire memory barrier if using thread-safe statics: - // Itanium ABI: - // An implementation supporting thread-safety on multiprocessor - // systems must also guarantee that references to the initialized - // object do not occur before the load of the initialization flag. - if (threadsafe) { - Builder.CreateBr(EndBlock); - CGF.EmitBlock(NoCheckBlock); - - llvm::Value *_false = Builder.getFalse(); - llvm::Value *_true = Builder.getTrue(); - - Builder.CreateCall5(CGM.getIntrinsic(llvm::Intrinsic::memory_barrier), - /* load-load, load-store */ _true, _true, - /* store-load, store-store */ _false, _false, - /* device or I/O */ _false); - } - CGF.EmitBlock(EndBlock); } diff --git a/test/CodeGen/Atomics.c b/test/CodeGen/Atomics.c index e5a5812f46..c440b6c190 100644 --- a/test/CodeGen/Atomics.c +++ b/test/CodeGen/Atomics.c @@ -192,13 +192,12 @@ void test_lock (void) __sync_synchronize (); // CHECK: fence seq_cst - // FIXME: These are wrong! - __sync_lock_release (&sc); // CHECK: store volatile - __sync_lock_release (&uc); // CHECK: store volatile - __sync_lock_release (&ss); // CHECK: store volatile - __sync_lock_release (&us); // CHECK: store volatile - __sync_lock_release (&si); // CHECK: store volatile - __sync_lock_release (&ui); // CHECK: store volatile - __sync_lock_release (&sll); // CHECK: store volatile - __sync_lock_release (&ull); // CHECK: store volatile + __sync_lock_release (&sc); // CHECK: store atomic {{.*}} release, align 1 + __sync_lock_release (&uc); // CHECK: store atomic {{.*}} release, align 1 + __sync_lock_release (&ss); // CHECK: store atomic {{.*}} release, align 2 + __sync_lock_release (&us); /// CHECK: store atomic {{.*}} release, align 2 + __sync_lock_release (&si); // CHECK: store atomic {{.*}} release, align 4 + __sync_lock_release (&ui); // CHECK: store atomic {{.*}} release, align 4 + __sync_lock_release (&sll); // CHECK: store atomic {{.*}} release, align 8 + __sync_lock_release (&ull); // CHECK: store atomic {{.*}} release, align 8 } diff --git a/test/CodeGen/atomic.c b/test/CodeGen/atomic.c index a0adac8e1c..c8f4fd09bb 100644 --- a/test/CodeGen/atomic.c +++ b/test/CodeGen/atomic.c @@ -75,8 +75,7 @@ int atomic(void) { // CHECK: cmpxchg i32* null, i32 0, i32 0 seq_cst __sync_lock_release(&val); - // FIXME: WRONG! - // CHECK: store volatile i32 0, i32* + // CHECK: store atomic {{.*}} release, align 4 __sync_synchronize (); // CHECK: fence seq_cst @@ -88,8 +87,7 @@ int atomic(void) { void release_return(int *lock) { // Ensure this is actually returning void all the way through. return __sync_lock_release(lock); - // FIXME: WRONG! - // CHECK: store volatile i32 0, i32* + // CHECK: store atomic {{.*}} release, align 4 } diff --git a/test/CodeGenCXX/static-init.cpp b/test/CodeGenCXX/static-init.cpp index d488e63669..9e2673cc96 100644 --- a/test/CodeGenCXX/static-init.cpp +++ b/test/CodeGenCXX/static-init.cpp @@ -12,13 +12,11 @@ struct A { }; void f() { + // CHECK: load atomic i8* bitcast (i64* @_ZGVZ1fvE1a to i8*) acquire, align 1 // CHECK: call i32 @__cxa_guard_acquire // CHECK: call void @_ZN1AC1Ev // CHECK: call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.A*)* @_ZN1AD1Ev to void (i8*)*), i8* getelementptr inbounds (%struct.A* @_ZZ1fvE1a, i32 0, i32 0), i8* bitcast (i8** @__dso_handle to i8*)) // CHECK: call void @__cxa_guard_release - - // rdar://problem/9496726 - // CHECK: call void @llvm.memory.barrier(i1 true, i1 true, i1 false, i1 false, i1 false) static A a; } |