From ab78ac1a2d55cd81e42f4688fbaa7bd689e6291f Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Fri, 22 Mar 2013 08:51:22 +0000 Subject: tsan: handle vptr loads specially This is required to determine ctor/dtor vs virtual call races. http://llvm-reviews.chandlerc.com/D566 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@177717 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Transforms/Instrumentation/ThreadSanitizer.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'lib/Transforms/Instrumentation/ThreadSanitizer.cpp') diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index f93c5ab4c8..40e0908cf5 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -63,6 +63,7 @@ STATISTIC(NumOmittedReadsBeforeWrite, "Number of reads ignored due to following writes"); STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size"); STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes"); +STATISTIC(NumInstrumentedVtableReads, "Number of vtable ptr reads"); STATISTIC(NumOmittedReadsFromConstantGlobals, "Number of reads from constant globals"); STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads"); @@ -108,6 +109,7 @@ struct ThreadSanitizer : public FunctionPass { Function *TsanAtomicThreadFence; Function *TsanAtomicSignalFence; Function *TsanVptrUpdate; + Function *TsanVptrLoad; }; } // namespace @@ -196,6 +198,8 @@ void ThreadSanitizer::initializeCallbacks(Module &M) { TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction( "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), NULL)); + TsanVptrLoad = checkInterfaceFunction(M.getOrInsertFunction( + "__tsan_vptr_read", IRB.getVoidTy(), IRB.getInt8PtrTy(), NULL)); TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction( "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL)); TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction( @@ -386,6 +390,12 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I) { NumInstrumentedVtableWrites++; return true; } + if (!IsWrite && isVtableAccess(I)) { + IRB.CreateCall(TsanVptrLoad, + IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); + NumInstrumentedVtableReads++; + return true; + } Value *OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); if (IsWrite) NumInstrumentedWrites++; -- cgit v1.2.3-18-g5258 From f464481db0c3b5404004b510921ca454803fd1d0 Mon Sep 17 00:00:00 2001 From: Kostya Serebryany Date: Thu, 28 Mar 2013 11:21:13 +0000 Subject: [tsan] make sure memset/memcpy/memmove are not inlined in tsan mode git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@178230 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Transforms/Instrumentation/ThreadSanitizer.cpp | 52 ++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'lib/Transforms/Instrumentation/ThreadSanitizer.cpp') diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index 40e0908cf5..299060a42f 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -30,6 +30,7 @@ #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" @@ -56,6 +57,9 @@ static cl::opt ClInstrumentFuncEntryExit( static cl::opt ClInstrumentAtomics( "tsan-instrument-atomics", cl::init(true), cl::desc("Instrument atomics"), cl::Hidden); +static cl::opt ClInstrumentMemIntrinsics( + "tsan-instrument-memintrinsics", cl::init(true), + cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden); STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); @@ -86,12 +90,14 @@ struct ThreadSanitizer : public FunctionPass { void initializeCallbacks(Module &M); bool instrumentLoadOrStore(Instruction *I); bool instrumentAtomic(Instruction *I); + bool instrumentMemIntrinsic(Instruction *I); void chooseInstructionsToInstrument(SmallVectorImpl &Local, SmallVectorImpl &All); bool addrPointsToConstantData(Value *Addr); int getMemoryAccessFuncIndex(Value *Addr); DataLayout *TD; + Type *IntptrTy; SmallString<64> BlacklistFile; OwningPtr BL; IntegerType *OrdTy; @@ -110,6 +116,7 @@ struct ThreadSanitizer : public FunctionPass { Function *TsanAtomicSignalFence; Function *TsanVptrUpdate; Function *TsanVptrLoad; + Function *MemmoveFn, *MemcpyFn, *MemsetFn; }; } // namespace @@ -204,6 +211,16 @@ void ThreadSanitizer::initializeCallbacks(Module &M) { "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL)); TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction( "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL)); + + MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction( + "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), + IRB.getInt8PtrTy(), IntptrTy, NULL)); + MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction( + "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), + IntptrTy, NULL)); + MemsetFn = checkInterfaceFunction(M.getOrInsertFunction( + "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), + IntptrTy, NULL)); } bool ThreadSanitizer::doInitialization(Module &M) { @@ -214,6 +231,7 @@ bool ThreadSanitizer::doInitialization(Module &M) { // Always insert a call to __tsan_init into the module's CTORs. IRBuilder<> IRB(M.getContext()); + IntptrTy = IRB.getIntPtrTy(TD); Value *TsanInit = M.getOrInsertFunction("__tsan_init", IRB.getVoidTy(), NULL); appendToGlobalCtors(M, cast(TsanInit), 0); @@ -313,6 +331,7 @@ bool ThreadSanitizer::runOnFunction(Function &F) { SmallVector AllLoadsAndStores; SmallVector LocalLoadsAndStores; SmallVector AtomicAccesses; + SmallVector MemIntrinCalls; bool Res = false; bool HasCalls = false; @@ -329,6 +348,8 @@ bool ThreadSanitizer::runOnFunction(Function &F) { else if (isa(BI)) RetVec.push_back(BI); else if (isa(BI) || isa(BI)) { + if (isa(BI)) + MemIntrinCalls.push_back(BI); HasCalls = true; chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores); } @@ -352,6 +373,11 @@ bool ThreadSanitizer::runOnFunction(Function &F) { Res |= instrumentAtomic(AtomicAccesses[i]); } + if (ClInstrumentMemIntrinsics) + for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) { + Res |= instrumentMemIntrinsic(MemIntrinCalls[i]); + } + // Instrument function entry/exit points if there were instrumented accesses. if ((Res || HasCalls) && ClInstrumentFuncEntryExit) { IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); @@ -433,6 +459,32 @@ static ConstantInt *createFailOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { return IRB->getInt32(v); } +// If a memset intrinsic gets inlined by the code gen, we will miss races on it. +// So, we either need to ensure the intrinsic is not inlined, or instrument it. +// We do not instrument memset/memmove/memcpy intrinsics (too complicated), +// instead we simply replace them with regular function calls, which are then +// intercepted by the run-time. +// Since tsan is running after everyone else, the calls should not be +// replaced back with intrinsics. If that becomes wrong at some point, +// we will need to call e.g. __tsan_memset to avoid the intrinsics. +bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { + IRBuilder<> IRB(I); + if (MemSetInst *M = dyn_cast(I)) { + IRB.CreateCall3(MemsetFn, + IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), + IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false), + IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)); + I->eraseFromParent(); + } else if (MemTransferInst *M = dyn_cast(I)) { + IRB.CreateCall3(isa(M) ? MemcpyFn : MemmoveFn, + IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), + IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()), + IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)); + I->eraseFromParent(); + } + return false; +} + // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x // standards. For background see C++11 standard. A slightly older, publically // available draft of the standard (not entirely up-to-date, but close enough -- cgit v1.2.3-18-g5258