aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/CGBuiltin.cpp
diff options
context:
space:
mode:
authorJF Bastien <jfb@chromium.org>2013-08-07 15:50:44 -0700
committerJF Bastien <jfb@chromium.org>2013-08-07 15:50:44 -0700
commit3b1ef29c338890ce55a1990389d8e63f6be7da2c (patch)
tree6a12c31173392c21830fdcd251e67907770dcede /lib/CodeGen/CGBuiltin.cpp
parente801395572ecfeb5ba913c4064944ed7fac3e806 (diff)
Treat __sync_synchronize and asm("":::"memory") as stronger fences.
This is a companion patch to: https://codereview.chromium.org/22240002/ https://codereview.chromium.org/22474008/ and deals with the Clang-side of things. The above patch will handle the fallouts of this Clang patch, including some changes to un-duplicate work that RewriteAsmDirectives.cpp does. The goal of this patch is to force some extra ordering on non-atomics for le32 which LLVM doesn't necessarily provide. R=eliben@chromium.org TEST= ninja check-all BUG= https://code.google.com/p/nativeclient/issues/detail?id=3475 BUG= https://code.google.com/p/nativeclient/issues/detail?id=3611 Review URL: https://codereview.chromium.org/22294002
Diffstat (limited to 'lib/CodeGen/CGBuiltin.cpp')
-rw-r--r--lib/CodeGen/CGBuiltin.cpp25
1 files changed, 24 insertions, 1 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index a655966d6e..20140034b8 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -20,6 +20,7 @@
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InlineAsm.h" // @LOCALMOD
#include "llvm/IR/Intrinsics.h"
using namespace clang;
@@ -1034,7 +1035,29 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// any way to safely use it... but in practice, it mostly works
// to use it with non-atomic loads and stores to get acquire/release
// semantics.
- Builder.CreateFence(llvm::SequentiallyConsistent);
+ // @LOCALMOD-START
+ // Targets can ask that ``__sync_synchronize()`` be surrounded with
+ // compiler fences. This should enforce ordering of more than just
+ // atomic memory accesses, though it won't guarantee that all
+ // accesses (e.g. those to non-escaping objects) won't be reordered.
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ std::string AsmString; // Empty.
+ std::string Constraints("~{memory}");
+ bool HasSideEffect = true;
+ if (getTargetHooks().addAsmMemoryAroundSyncSynchronize()) {
+ Builder.CreateCall(
+ llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect))->
+ addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind);
+ Builder.CreateFence(llvm::SequentiallyConsistent);
+ Builder.CreateCall(
+ llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect))->
+ addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoUnwind);
+ } else {
+ Builder.CreateFence(llvm::SequentiallyConsistent);
+ }
+ // @LOCALMOD-END
return RValue::get(0);
}