aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/CodeGen/CGBuiltin.cpp25
-rw-r--r--lib/CodeGen/CGStmt.cpp12
-rw-r--r--lib/CodeGen/TargetInfo.cpp2
-rw-r--r--lib/CodeGen/TargetInfo.h22
-rw-r--r--test/CodeGen/NaCl/atomics.c64
5 files changed, 124 insertions, 1 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index a655966d6e..20140034b8 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -20,6 +20,7 @@
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InlineAsm.h" // @LOCALMOD
#include "llvm/IR/Intrinsics.h"
using namespace clang;
@@ -1034,7 +1035,29 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// any way to safely use it... but in practice, it mostly works
// to use it with non-atomic loads and stores to get acquire/release
// semantics.
- Builder.CreateFence(llvm::SequentiallyConsistent);
+ // @LOCALMOD-START
+ // Targets can ask that ``__sync_synchronize()`` be surrounded with
+ // compiler fences. This should enforce ordering of more than just
+ // atomic memory accesses, though it won't guarantee that all
+ // accesses (e.g. those to non-escaping objects) won't be reordered.
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ std::string AsmString; // Empty.
+ std::string Constraints("~{memory}");
+ bool HasSideEffect = true;
+ if (getTargetHooks().addAsmMemoryAroundSyncSynchronize()) {
+ Builder.CreateCall(
+ llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect))->
+ addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind);
+ Builder.CreateFence(llvm::SequentiallyConsistent);
+ Builder.CreateCall(
+ llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect))->
+ addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind);
+ } else {
+ Builder.CreateFence(llvm::SequentiallyConsistent);
+ }
+ // @LOCALMOD-END
return RValue::get(0);
}
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 5e2ebe0d9c..a381736206 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -1701,6 +1701,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Result->addAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::NoUnwind);
+ // @LOCALMOD-START
+ if (getTargetHooks().asmMemoryIsFence() && IA->isAsmMemory()) {
+ // Targets can ask that ``asm("":::"memory")`` be treated like
+ // ``__sync_synchronize()``.
+ Builder.CreateFence(llvm::SequentiallyConsistent);
+ Builder.CreateCall(
+ llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect))->
+ addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind);
+ }
+ // @LOCALMOD-END
+
// Slap the source location of the inline asm into a !srcloc metadata on the
// call. FIXME: Handle metadata for MS-style inline asms.
if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index f9689affa0..4fb888a767 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -437,6 +437,8 @@ class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
/// For PNaCl we don't want llvm.pow.* intrinsics to be emitted instead
/// of library function calls.
bool emitIntrinsicForPow() const { return false; }
+ bool addAsmMemoryAroundSyncSynchronize() const { return true; } // @LOCALMOD
+ bool asmMemoryIsFence() const { return true; } // @LOCALMOD
};
void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index a682c183f0..a122e882e2 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -171,6 +171,28 @@ namespace clang {
/// that unprototyped calls to varargs functions still succeed.
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
const FunctionNoProtoType *fnType) const;
+
+ // @LOCALMOD-START
+ /// Determine whether the sequentially consistent fence generated for
+ /// the legacy GCC-style ``__sync_synchronize()`` builtin should be
+ /// surrounded by empty assembly directives which touch all of
+ /// memory. This allows platforms which aim for portability to
+ /// isolate themselves from changes in sequentially consistent
+ /// fence's semantics, since its intent is to represent the
+ /// C11/C++11 memory model which only orders atomic memory accesses.
+ /// This won't guarantee that all accesses (e.g. those to
+ /// non-escaping objects) will not be reordered.
+ virtual bool addAsmMemoryAroundSyncSynchronize() const {
+ return false;
+ }
+
+ /// Determine whether a full sequentially consistent fence should be
+ /// emitted when ``asm("":::"memory")`` is encountered, treating it
+ /// like ``__sync_synchronize()``.
+ virtual bool asmMemoryIsFence() const {
+ return false;
+ }
+ // @LOCALMOD-END
};
}
diff --git a/test/CodeGen/NaCl/atomics.c b/test/CodeGen/NaCl/atomics.c
new file mode 100644
index 0000000000..9922e93630
--- /dev/null
+++ b/test/CodeGen/NaCl/atomics.c
@@ -0,0 +1,64 @@
+// Test frontend handling of synchronization builtins which NaCl handles
+// differently.
+// Modified from test/CodeGen/Atomics.c
+// RUN: %clang_cc1 -triple le32-unknown-nacl -emit-llvm %s -o - | FileCheck %s
+
+// CHECK: define void @test_sync_synchronize()
+// CHECK-NEXT: entry:
+void test_sync_synchronize (void)
+{
+ __sync_synchronize ();
+ // CHECK-NEXT: call void asm sideeffect "", "~{memory}"()
+ // CHECK-NEXT: fence seq_cst
+ // CHECK-NEXT: call void asm sideeffect "", "~{memory}"()
+
+ // CHECK-NEXT: ret void
+}
+
+// CHECK: define void @test_asm_memory_1()
+// CHECK-NEXT: entry:
+void test_asm_memory_1 (void)
+{
+ asm ("":::"memory");
+ // CHECK-NEXT: call void asm sideeffect "", "~{memory}"()
+ // CHECK-NEXT: fence seq_cst
+ // CHECK-NEXT: call void asm sideeffect "", "~{memory}"()
+
+ // CHECK-NEXT: ret void
+}
+
+// CHECK: define void @test_asm_memory_2()
+// CHECK-NEXT: entry:
+void test_asm_memory_2 (void)
+{
+ asm volatile ("":::"memory");
+ // CHECK-NEXT: call void asm sideeffect "", "~{memory}"()
+ // CHECK-NEXT: fence seq_cst
+ // CHECK-NEXT: call void asm sideeffect "", "~{memory}"()
+
+ // CHECK-NEXT: ret void
+}
+
+// CHECK: define void @test_asm_memory_3()
+// CHECK-NEXT: entry:
+void test_asm_memory_3 (void)
+{
+ __asm__ ("":::"memory");
+ // CHECK-NEXT: call void asm sideeffect "", "~{memory}"()
+ // CHECK-NEXT: fence seq_cst
+ // CHECK-NEXT: call void asm sideeffect "", "~{memory}"()
+
+ // CHECK-NEXT: ret void
+}
+
+// CHECK: define void @test_asm_memory_4()
+// CHECK-NEXT: entry:
+void test_asm_memory_4 (void)
+{
+ __asm__ __volatile__ ("":::"memory");
+ // CHECK-NEXT: call void asm sideeffect "", "~{memory}"()
+ // CHECK-NEXT: fence seq_cst
+ // CHECK-NEXT: call void asm sideeffect "", "~{memory}"()
+
+ // CHECK-NEXT: ret void
+}