diff options
author | Dmitry Vyukov <dvyukov@google.com> | 2012-11-27 08:09:25 +0000 |
---|---|---|
committer | Dmitry Vyukov <dvyukov@google.com> | 2012-11-27 08:09:25 +0000 |
commit | b10675ef14bea530551172547e2111bf707a408e (patch) | |
tree | 6153b0cac42c92e38d7a8d271f49bc78fb4432eb | |
parent | 2cf4fb488437f2d9107a6ad280a215016a34c901 (diff) |
tsan: instrument atomic nand operation
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@168684 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/Transforms/Instrumentation/ThreadSanitizer.cpp | 2 | ||||
-rw-r--r-- | test/Instrumentation/ThreadSanitizer/atomic.ll | 200 |
2 files changed, 202 insertions, 0 deletions
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index d054b5e22f..cdfaedf47c 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -188,6 +188,8 @@ bool ThreadSanitizer::doInitialization(Module &M) { NamePart = "_fetch_or"; else if (op == AtomicRMWInst::Xor) NamePart = "_fetch_xor"; + else if (op == AtomicRMWInst::Nand) + NamePart = "_fetch_nand"; else continue; SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); diff --git a/test/Instrumentation/ThreadSanitizer/atomic.ll b/test/Instrumentation/ThreadSanitizer/atomic.ll index d9fc222f12..70b6cbbf31 100644 --- a/test/Instrumentation/ThreadSanitizer/atomic.ll +++ b/test/Instrumentation/ThreadSanitizer/atomic.ll @@ -114,6 +114,14 @@ entry: ; CHECK: atomic8_xor_monotonic ; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 0) +define void @atomic8_nand_monotonic(i8* %a) nounwind uwtable { +entry: + atomicrmw nand i8* %a, i8 0 monotonic + ret void +} +; CHECK: atomic8_nand_monotonic +; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 0) + define void @atomic8_xchg_acquire(i8* %a) nounwind uwtable { entry: atomicrmw xchg i8* %a, i8 0 acquire @@ -162,6 +170,14 @@ entry: ; CHECK: atomic8_xor_acquire ; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 2) +define void @atomic8_nand_acquire(i8* %a) nounwind uwtable { +entry: + atomicrmw nand i8* %a, i8 0 acquire + ret void +} +; CHECK: atomic8_nand_acquire +; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 2) + define void @atomic8_xchg_release(i8* %a) nounwind uwtable { entry: atomicrmw xchg i8* %a, i8 0 release @@ -210,6 +226,14 @@ entry: ; CHECK: atomic8_xor_release ; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 3) +define void @atomic8_nand_release(i8* %a) nounwind uwtable { +entry: + atomicrmw nand i8* %a, i8 0 release + ret void +} +; CHECK: atomic8_nand_release +; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 3) + define void @atomic8_xchg_acq_rel(i8* %a) nounwind uwtable { entry: atomicrmw xchg i8* %a, i8 0 acq_rel @@ -258,6 +282,14 @@ entry: ; CHECK: atomic8_xor_acq_rel ; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 4) +define void @atomic8_nand_acq_rel(i8* %a) nounwind uwtable { +entry: + atomicrmw nand i8* %a, i8 0 acq_rel + ret void +} +; CHECK: atomic8_nand_acq_rel +; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 4) + define void @atomic8_xchg_seq_cst(i8* %a) nounwind uwtable { entry: atomicrmw xchg i8* %a, i8 0 seq_cst @@ -306,6 +338,14 @@ entry: ; CHECK: atomic8_xor_seq_cst ; CHECK: call i8 @__tsan_atomic8_fetch_xor(i8* %a, i8 0, i32 5) +define void @atomic8_nand_seq_cst(i8* %a) nounwind uwtable { +entry: + atomicrmw nand i8* %a, i8 0 seq_cst + ret void +} +; CHECK: atomic8_nand_seq_cst +; CHECK: call i8 @__tsan_atomic8_fetch_nand(i8* %a, i8 0, i32 5) + define void @atomic8_cas_monotonic(i8* %a) nounwind uwtable { entry: cmpxchg i8* %a, i8 0, i8 1 monotonic @@ -458,6 +498,14 @@ entry: ; CHECK: atomic16_xor_monotonic ; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 0) +define void @atomic16_nand_monotonic(i16* %a) nounwind uwtable { +entry: + atomicrmw nand i16* %a, i16 0 monotonic + ret void +} +; CHECK: atomic16_nand_monotonic +; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 0) + define void @atomic16_xchg_acquire(i16* %a) nounwind uwtable { entry: atomicrmw xchg i16* %a, i16 0 acquire @@ -506,6 +554,14 @@ entry: ; CHECK: atomic16_xor_acquire ; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 2) +define void @atomic16_nand_acquire(i16* %a) nounwind uwtable { +entry: + atomicrmw nand i16* %a, i16 0 acquire + ret void +} +; CHECK: atomic16_nand_acquire +; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 2) + define void @atomic16_xchg_release(i16* %a) nounwind uwtable { entry: atomicrmw xchg i16* %a, i16 0 release @@ -554,6 +610,14 @@ entry: ; CHECK: atomic16_xor_release ; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 3) +define void @atomic16_nand_release(i16* %a) nounwind uwtable { +entry: + atomicrmw nand i16* %a, i16 0 release + ret void +} +; CHECK: atomic16_nand_release +; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 3) + define void @atomic16_xchg_acq_rel(i16* %a) nounwind uwtable { entry: atomicrmw xchg i16* %a, i16 0 acq_rel @@ -602,6 +666,14 @@ entry: ; CHECK: atomic16_xor_acq_rel ; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 4) +define void @atomic16_nand_acq_rel(i16* %a) nounwind uwtable { +entry: + atomicrmw nand i16* %a, i16 0 acq_rel + ret void +} +; CHECK: atomic16_nand_acq_rel +; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 4) + define void @atomic16_xchg_seq_cst(i16* %a) nounwind uwtable { entry: atomicrmw xchg i16* %a, i16 0 seq_cst @@ -650,6 +722,14 @@ entry: ; CHECK: atomic16_xor_seq_cst ; CHECK: call i16 @__tsan_atomic16_fetch_xor(i16* %a, i16 0, i32 5) +define void @atomic16_nand_seq_cst(i16* %a) nounwind uwtable { +entry: + atomicrmw nand i16* %a, i16 0 seq_cst + ret void +} +; CHECK: atomic16_nand_seq_cst +; CHECK: call i16 @__tsan_atomic16_fetch_nand(i16* %a, i16 0, i32 5) + define void @atomic16_cas_monotonic(i16* %a) nounwind uwtable { entry: cmpxchg i16* %a, i16 0, i16 1 monotonic @@ -802,6 +882,14 @@ entry: ; CHECK: atomic32_xor_monotonic ; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 0) +define void @atomic32_nand_monotonic(i32* %a) nounwind uwtable { +entry: + atomicrmw nand i32* %a, i32 0 monotonic + ret void +} +; CHECK: atomic32_nand_monotonic +; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 0) + define void @atomic32_xchg_acquire(i32* %a) nounwind uwtable { entry: atomicrmw xchg i32* %a, i32 0 acquire @@ -850,6 +938,14 @@ entry: ; CHECK: atomic32_xor_acquire ; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 2) +define void @atomic32_nand_acquire(i32* %a) nounwind uwtable { +entry: + atomicrmw nand i32* %a, i32 0 acquire + ret void +} +; CHECK: atomic32_nand_acquire +; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 2) + define void @atomic32_xchg_release(i32* %a) nounwind uwtable { entry: atomicrmw xchg i32* %a, i32 0 release @@ -898,6 +994,14 @@ entry: ; CHECK: atomic32_xor_release ; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 3) +define void @atomic32_nand_release(i32* %a) nounwind uwtable { +entry: + atomicrmw nand i32* %a, i32 0 release + ret void +} +; CHECK: atomic32_nand_release +; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 3) + define void @atomic32_xchg_acq_rel(i32* %a) nounwind uwtable { entry: atomicrmw xchg i32* %a, i32 0 acq_rel @@ -946,6 +1050,14 @@ entry: ; CHECK: atomic32_xor_acq_rel ; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 4) +define void @atomic32_nand_acq_rel(i32* %a) nounwind uwtable { +entry: + atomicrmw nand i32* %a, i32 0 acq_rel + ret void +} +; CHECK: atomic32_nand_acq_rel +; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 4) + define void @atomic32_xchg_seq_cst(i32* %a) nounwind uwtable { entry: atomicrmw xchg i32* %a, i32 0 seq_cst @@ -994,6 +1106,14 @@ entry: ; CHECK: atomic32_xor_seq_cst ; CHECK: call i32 @__tsan_atomic32_fetch_xor(i32* %a, i32 0, i32 5) +define void @atomic32_nand_seq_cst(i32* %a) nounwind uwtable { +entry: + atomicrmw nand i32* %a, i32 0 seq_cst + ret void +} +; CHECK: atomic32_nand_seq_cst +; CHECK: call i32 @__tsan_atomic32_fetch_nand(i32* %a, i32 0, i32 5) + define void @atomic32_cas_monotonic(i32* %a) nounwind uwtable { entry: cmpxchg i32* %a, i32 0, i32 1 monotonic @@ -1146,6 +1266,14 @@ entry: ; CHECK: atomic64_xor_monotonic ; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 0) +define void @atomic64_nand_monotonic(i64* %a) nounwind uwtable { +entry: + atomicrmw nand i64* %a, i64 0 monotonic + ret void +} +; CHECK: atomic64_nand_monotonic +; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 0) + define void @atomic64_xchg_acquire(i64* %a) nounwind uwtable { entry: atomicrmw xchg i64* %a, i64 0 acquire @@ -1194,6 +1322,14 @@ entry: ; CHECK: atomic64_xor_acquire ; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 2) +define void @atomic64_nand_acquire(i64* %a) nounwind uwtable { +entry: + atomicrmw nand i64* %a, i64 0 acquire + ret void +} +; CHECK: atomic64_nand_acquire +; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 2) + define void @atomic64_xchg_release(i64* %a) nounwind uwtable { entry: atomicrmw xchg i64* %a, i64 0 release @@ -1242,6 +1378,14 @@ entry: ; CHECK: atomic64_xor_release ; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 3) +define void @atomic64_nand_release(i64* %a) nounwind uwtable { +entry: + atomicrmw nand i64* %a, i64 0 release + ret void +} +; CHECK: atomic64_nand_release +; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 3) + define void @atomic64_xchg_acq_rel(i64* %a) nounwind uwtable { entry: atomicrmw xchg i64* %a, i64 0 acq_rel @@ -1290,6 +1434,14 @@ entry: ; CHECK: atomic64_xor_acq_rel ; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 4) +define void @atomic64_nand_acq_rel(i64* %a) nounwind uwtable { +entry: + atomicrmw nand i64* %a, i64 0 acq_rel + ret void +} +; CHECK: atomic64_nand_acq_rel +; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 4) + define void @atomic64_xchg_seq_cst(i64* %a) nounwind uwtable { entry: atomicrmw xchg i64* %a, i64 0 seq_cst @@ -1338,6 +1490,14 @@ entry: ; CHECK: atomic64_xor_seq_cst ; CHECK: call i64 @__tsan_atomic64_fetch_xor(i64* %a, i64 0, i32 5) +define void @atomic64_nand_seq_cst(i64* %a) nounwind uwtable { +entry: + atomicrmw nand i64* %a, i64 0 seq_cst + ret void +} +; CHECK: atomic64_nand_seq_cst +; CHECK: call i64 @__tsan_atomic64_fetch_nand(i64* %a, i64 0, i32 5) + define void @atomic64_cas_monotonic(i64* %a) nounwind uwtable { entry: cmpxchg i64* %a, i64 0, i64 1 monotonic @@ -1490,6 +1650,14 @@ entry: ; CHECK: atomic128_xor_monotonic ; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 0) +define void @atomic128_nand_monotonic(i128* %a) nounwind uwtable { +entry: + atomicrmw nand i128* %a, i128 0 monotonic + ret void +} +; CHECK: atomic128_nand_monotonic +; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 0) + define void @atomic128_xchg_acquire(i128* %a) nounwind uwtable { entry: atomicrmw xchg i128* %a, i128 0 acquire @@ -1538,6 +1706,14 @@ entry: ; CHECK: atomic128_xor_acquire ; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 2) +define void @atomic128_nand_acquire(i128* %a) nounwind uwtable { +entry: + atomicrmw nand i128* %a, i128 0 acquire + ret void +} +; CHECK: atomic128_nand_acquire +; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 2) + define void @atomic128_xchg_release(i128* %a) nounwind uwtable { entry: atomicrmw xchg i128* %a, i128 0 release @@ -1586,6 +1762,14 @@ entry: ; CHECK: atomic128_xor_release ; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 3) +define void @atomic128_nand_release(i128* %a) nounwind uwtable { +entry: + atomicrmw nand i128* %a, i128 0 release + ret void +} +; CHECK: atomic128_nand_release +; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 3) + define void @atomic128_xchg_acq_rel(i128* %a) nounwind uwtable { entry: atomicrmw xchg i128* %a, i128 0 acq_rel @@ -1634,6 +1818,14 @@ entry: ; CHECK: atomic128_xor_acq_rel ; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 4) +define void @atomic128_nand_acq_rel(i128* %a) nounwind uwtable { +entry: + atomicrmw nand i128* %a, i128 0 acq_rel + ret void +} +; CHECK: atomic128_nand_acq_rel +; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 4) + define void @atomic128_xchg_seq_cst(i128* %a) nounwind uwtable { entry: atomicrmw xchg i128* %a, i128 0 seq_cst @@ -1682,6 +1874,14 @@ entry: ; CHECK: atomic128_xor_seq_cst ; CHECK: call i128 @__tsan_atomic128_fetch_xor(i128* %a, i128 0, i32 5) +define void @atomic128_nand_seq_cst(i128* %a) nounwind uwtable { +entry: + atomicrmw nand i128* %a, i128 0 seq_cst + ret void +} +; CHECK: atomic128_nand_seq_cst +; CHECK: call i128 @__tsan_atomic128_fetch_nand(i128* %a, i128 0, i32 5) + define void @atomic128_cas_monotonic(i128* %a) nounwind uwtable { entry: cmpxchg i128* %a, i128 0, i128 1 monotonic |