mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2024-11-24 14:20:17 +00:00
tsan: add new mutex annotations
There are several problems with the current annotations (AnnotateRWLockCreate and friends): - they don't fully support deadlock detection (we need a hook _before_ mutex lock) - they don't support insertion of random artificial delays to perturb execution (again we need a hook _before_ mutex lock) - they don't support setting extended mutex attributes like read/write reentrancy (only "linker init" was bolted on) - they don't support setting mutex attributes if a mutex don't have a "constructor" (e.g. static, Java, Go mutexes) - they don't ignore synchronization inside of lock/unlock operations which leads to slowdown and false negatives The new annotations solve of the above problems. See tsan_interface.h for the interface specification and comments. Reviewed in https://reviews.llvm.org/D31093 llvm-svn: 298809
This commit is contained in:
parent
de27a6dcfa
commit
8096a8c86f
@ -8,6 +8,7 @@ set(SANITIZER_HEADERS
|
||||
sanitizer/linux_syscall_hooks.h
|
||||
sanitizer/lsan_interface.h
|
||||
sanitizer/msan_interface.h
|
||||
sanitizer/tsan_interface.h
|
||||
sanitizer/tsan_interface_atomic.h)
|
||||
|
||||
set(XRAY_HEADERS
|
||||
|
121
compiler-rt/include/sanitizer/tsan_interface.h
Normal file
121
compiler-rt/include/sanitizer/tsan_interface.h
Normal file
@ -0,0 +1,121 @@
|
||||
//===-- tsan_interface.h ----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||
//
|
||||
// Public interface header for TSan.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_TSAN_INTERFACE_H
|
||||
#define SANITIZER_TSAN_INTERFACE_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// __tsan_release establishes a happens-before relation with a preceding
|
||||
// __tsan_acquire on the same address.
|
||||
void __tsan_acquire(void *addr);
|
||||
void __tsan_release(void *addr);
|
||||
|
||||
// Annotations for custom mutexes.
|
||||
// The annotations allow to get better reports (with sets of locked mutexes),
|
||||
// detect more types of bugs (e.g. mutex misuses, races between lock/unlock and
|
||||
// destruction and potential deadlocks) and improve precision and performance
|
||||
// (by ignoring individual atomic operations in mutex code). However, the
|
||||
// downside is that annotated mutex code itself is not checked for correctness.
|
||||
|
||||
// Mutex creation flags are passed to __tsan_mutex_create annotation.
|
||||
// If mutex has no constructor and __tsan_mutex_create is not called,
|
||||
// the flags may be passed to __tsan_mutex_pre_lock/__tsan_mutex_post_lock
|
||||
// annotations.
|
||||
|
||||
// Mutex has static storage duration and no-op constructor and destructor.
|
||||
// This effectively makes tsan ignore destroy annotation.
|
||||
const unsigned __tsan_mutex_linker_init = 1 << 0;
|
||||
// Mutex is write reentrant.
|
||||
const unsigned __tsan_mutex_write_reentrant = 1 << 1;
|
||||
// Mutex is read reentrant.
|
||||
const unsigned __tsan_mutex_read_reentrant = 1 << 2;
|
||||
|
||||
// Mutex operation flags:
|
||||
|
||||
// Denotes read lock operation.
|
||||
const unsigned __tsan_mutex_read_lock = 1 << 3;
|
||||
// Denotes try lock operation.
|
||||
const unsigned __tsan_mutex_try_lock = 1 << 4;
|
||||
// Denotes that a try lock operation has failed to acquire the mutex.
|
||||
const unsigned __tsan_mutex_try_lock_failed = 1 << 5;
|
||||
// Denotes that the lock operation acquires multiple recursion levels.
|
||||
// Number of levels is passed in recursion parameter.
|
||||
// This is useful for annotation of e.g. Java builtin monitors,
|
||||
// for which wait operation releases all recursive acquisitions of the mutex.
|
||||
const unsigned __tsan_mutex_recursive_lock = 1 << 6;
|
||||
// Denotes that the unlock operation releases all recursion levels.
|
||||
// Number of released levels is returned and later must be passed to
|
||||
// the corresponding __tsan_mutex_post_lock annotation.
|
||||
const unsigned __tsan_mutex_recursive_unlock = 1 << 7;
|
||||
|
||||
// Annotate creation of a mutex.
|
||||
// Supported flags: mutex creation flags.
|
||||
void __tsan_mutex_create(void *addr, unsigned flags);
|
||||
|
||||
// Annotate destruction of a mutex.
|
||||
// Supported flags: none.
|
||||
void __tsan_mutex_destroy(void *addr, unsigned flags);
|
||||
|
||||
// Annotate start of lock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock
|
||||
// - __tsan_mutex_try_lock
|
||||
// - all mutex creation flags
|
||||
void __tsan_mutex_pre_lock(void *addr, unsigned flags);
|
||||
|
||||
// Annotate end of lock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_lock)
|
||||
// - __tsan_mutex_try_lock (must match __tsan_mutex_pre_lock)
|
||||
// - __tsan_mutex_try_lock_failed
|
||||
// - __tsan_mutex_recursive_lock
|
||||
// - all mutex creation flags
|
||||
void __tsan_mutex_post_lock(void *addr, unsigned flags, int recursion);
|
||||
|
||||
// Annotate start of unlock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock
|
||||
// - __tsan_mutex_recursive_unlock
|
||||
int __tsan_mutex_pre_unlock(void *addr, unsigned flags);
|
||||
|
||||
// Annotate end of unlock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_unlock)
|
||||
void __tsan_mutex_post_unlock(void *addr, unsigned flags);
|
||||
|
||||
// Annotate start/end of notify/signal/broadcast operation.
|
||||
// Supported flags: none.
|
||||
void __tsan_mutex_pre_signal(void *addr, unsigned flags);
|
||||
void __tsan_mutex_post_signal(void *addr, unsigned flags);
|
||||
|
||||
// Annotate start/end of a region of code where lock/unlock/signal operation
|
||||
// diverts to do something else unrelated to the mutex. This can be used to
|
||||
// annotate, for example, calls into cooperative scheduler or contention
|
||||
// profiling code.
|
||||
// These annotations must be called only from within
|
||||
// __tsan_mutex_pre/post_lock, __tsan_mutex_pre/post_unlock,
|
||||
// __tsan_mutex_pre/post_signal regions.
|
||||
// Supported flags: none.
|
||||
void __tsan_mutex_pre_divert(void *addr, unsigned flags);
|
||||
void __tsan_mutex_post_divert(void *addr, unsigned flags);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_TSAN_INTERFACE_H
|
@ -24,7 +24,8 @@
|
||||
// COMMON_INTERCEPTOR_SET_THREAD_NAME
|
||||
// COMMON_INTERCEPTOR_ON_DLOPEN
|
||||
// COMMON_INTERCEPTOR_ON_EXIT
|
||||
// COMMON_INTERCEPTOR_MUTEX_LOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_POST_LOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_UNLOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_REPAIR
|
||||
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
|
||||
@ -89,8 +90,12 @@ bool PlatformHasDifferentMemcpyAndMemmove();
|
||||
#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_LOCK
|
||||
#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) {}
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
|
||||
#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK
|
||||
#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK
|
||||
@ -3721,11 +3726,12 @@ INTERCEPTOR(void, _exit, int status) {
|
||||
INTERCEPTOR(int, pthread_mutex_lock, void *m) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);
|
||||
COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
|
||||
int res = REAL(pthread_mutex_lock)(m);
|
||||
if (res == errno_EOWNERDEAD)
|
||||
COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
|
||||
if (res == 0 || res == errno_EOWNERDEAD)
|
||||
COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m);
|
||||
COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
|
||||
if (res == errno_EINVAL)
|
||||
COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
|
||||
return res;
|
||||
|
@ -247,13 +247,17 @@ void __tsan_finalizer_goroutine(ThreadState *thr) {
|
||||
}
|
||||
|
||||
void __tsan_mutex_before_lock(ThreadState *thr, uptr addr, uptr write) {
|
||||
if (write)
|
||||
MutexPreLock(thr, 0, addr);
|
||||
else
|
||||
MutexPreReadLock(thr, 0, addr);
|
||||
}
|
||||
|
||||
void __tsan_mutex_after_lock(ThreadState *thr, uptr addr, uptr write) {
|
||||
if (write)
|
||||
MutexLock(thr, 0, addr);
|
||||
MutexPostLock(thr, 0, addr);
|
||||
else
|
||||
MutexReadLock(thr, 0, addr);
|
||||
MutexPostReadLock(thr, 0, addr);
|
||||
}
|
||||
|
||||
void __tsan_mutex_before_unlock(ThreadState *thr, uptr addr, uptr write) {
|
||||
|
@ -9,6 +9,16 @@ __tsan_java*
|
||||
__tsan_unaligned*
|
||||
__tsan_release
|
||||
__tsan_acquire
|
||||
__tsan_mutex_create
|
||||
__tsan_mutex_destroy
|
||||
__tsan_mutex_pre_lock
|
||||
__tsan_mutex_post_lock
|
||||
__tsan_mutex_pre_unlock
|
||||
__tsan_mutex_post_unlock
|
||||
__tsan_mutex_pre_signal
|
||||
__tsan_mutex_post_signal
|
||||
__tsan_mutex_pre_divert
|
||||
__tsan_mutex_post_divert
|
||||
__ubsan_*
|
||||
Annotate*
|
||||
WTFAnnotate*
|
||||
|
@ -21,10 +21,6 @@
|
||||
|
||||
namespace __tsan {
|
||||
|
||||
Flags *flags() {
|
||||
return &ctx->flags;
|
||||
}
|
||||
|
||||
// Can be overriden in frontend.
|
||||
#ifdef TSAN_EXTERNAL_HOOKS
|
||||
extern "C" const char* __tsan_default_options();
|
||||
|
@ -28,7 +28,6 @@ struct Flags : DDFlags {
|
||||
void ParseFromString(const char *str);
|
||||
};
|
||||
|
||||
Flags *flags();
|
||||
void InitializeFlags(Flags *flags, const char *env);
|
||||
} // namespace __tsan
|
||||
|
||||
|
@ -277,7 +277,7 @@ ScopedInterceptor::~ScopedInterceptor() {
|
||||
|
||||
void ScopedInterceptor::EnableIgnores() {
|
||||
if (ignoring_) {
|
||||
ThreadIgnoreBegin(thr_, pc_);
|
||||
ThreadIgnoreBegin(thr_, pc_, false);
|
||||
if (in_ignored_lib_) {
|
||||
DCHECK(!thr_->in_ignored_lib);
|
||||
thr_->in_ignored_lib = true;
|
||||
@ -1025,7 +1025,7 @@ static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
|
||||
ThreadSignalContext *ctx = SigCtx(arg->thr);
|
||||
CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
|
||||
atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
|
||||
MutexLock(arg->thr, arg->pc, (uptr)arg->m);
|
||||
MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock);
|
||||
// Undo BlockingCall ctor effects.
|
||||
arg->thr->ignore_interceptors--;
|
||||
arg->si->~ScopedInterceptor();
|
||||
@ -1054,7 +1054,7 @@ static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
|
||||
fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
|
||||
}
|
||||
if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
|
||||
MutexLock(thr, pc, (uptr)m);
|
||||
MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -1114,14 +1114,15 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
|
||||
int res = REAL(pthread_mutex_init)(m, a);
|
||||
if (res == 0) {
|
||||
bool recursive = false;
|
||||
u32 flagz = 0;
|
||||
if (a) {
|
||||
int type = 0;
|
||||
if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
|
||||
recursive = (type == PTHREAD_MUTEX_RECURSIVE
|
||||
|| type == PTHREAD_MUTEX_RECURSIVE_NP);
|
||||
if (type == PTHREAD_MUTEX_RECURSIVE ||
|
||||
type == PTHREAD_MUTEX_RECURSIVE_NP)
|
||||
flagz |= MutexFlagWriteReentrant;
|
||||
}
|
||||
MutexCreate(thr, pc, (uptr)m, false, recursive, false);
|
||||
MutexCreate(thr, pc, (uptr)m, flagz);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1141,7 +1142,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
|
||||
if (res == EOWNERDEAD)
|
||||
MutexRepair(thr, pc, (uptr)m);
|
||||
if (res == 0 || res == EOWNERDEAD)
|
||||
MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
|
||||
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -1150,7 +1151,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
|
||||
int res = REAL(pthread_mutex_timedlock)(m, abstime);
|
||||
if (res == 0) {
|
||||
MutexLock(thr, pc, (uptr)m);
|
||||
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1161,7 +1162,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
|
||||
int res = REAL(pthread_spin_init)(m, pshared);
|
||||
if (res == 0) {
|
||||
MutexCreate(thr, pc, (uptr)m, false, false, false);
|
||||
MutexCreate(thr, pc, (uptr)m);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1177,9 +1178,10 @@ TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
|
||||
|
||||
TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
|
||||
MutexPreLock(thr, pc, (uptr)m);
|
||||
int res = REAL(pthread_spin_lock)(m);
|
||||
if (res == 0) {
|
||||
MutexLock(thr, pc, (uptr)m);
|
||||
MutexPostLock(thr, pc, (uptr)m);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1188,7 +1190,7 @@ TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
|
||||
int res = REAL(pthread_spin_trylock)(m);
|
||||
if (res == 0) {
|
||||
MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
|
||||
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1205,7 +1207,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
|
||||
int res = REAL(pthread_rwlock_init)(m, a);
|
||||
if (res == 0) {
|
||||
MutexCreate(thr, pc, (uptr)m, true, false, false);
|
||||
MutexCreate(thr, pc, (uptr)m);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1221,9 +1223,10 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
|
||||
|
||||
TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
|
||||
MutexPreReadLock(thr, pc, (uptr)m);
|
||||
int res = REAL(pthread_rwlock_rdlock)(m);
|
||||
if (res == 0) {
|
||||
MutexReadLock(thr, pc, (uptr)m);
|
||||
MutexPostReadLock(thr, pc, (uptr)m);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1232,7 +1235,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
|
||||
int res = REAL(pthread_rwlock_tryrdlock)(m);
|
||||
if (res == 0) {
|
||||
MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true);
|
||||
MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1242,7 +1245,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
|
||||
int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
|
||||
if (res == 0) {
|
||||
MutexReadLock(thr, pc, (uptr)m);
|
||||
MutexPostReadLock(thr, pc, (uptr)m);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1250,9 +1253,10 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
|
||||
|
||||
TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
|
||||
MutexPreLock(thr, pc, (uptr)m);
|
||||
int res = REAL(pthread_rwlock_wrlock)(m);
|
||||
if (res == 0) {
|
||||
MutexLock(thr, pc, (uptr)m);
|
||||
MutexPostLock(thr, pc, (uptr)m);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1261,7 +1265,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
|
||||
int res = REAL(pthread_rwlock_trywrlock)(m);
|
||||
if (res == 0) {
|
||||
MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
|
||||
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -1271,7 +1275,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
|
||||
int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
|
||||
if (res == 0) {
|
||||
MutexLock(thr, pc, (uptr)m);
|
||||
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -2251,8 +2255,12 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
|
||||
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
|
||||
OnExit(((TsanInterceptorContext *) ctx)->thr)
|
||||
|
||||
#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) \
|
||||
MutexLock(((TsanInterceptorContext *)ctx)->thr, \
|
||||
#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
|
||||
MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
|
||||
((TsanInterceptorContext *)ctx)->pc, (uptr)m)
|
||||
|
||||
#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
|
||||
MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
|
||||
((TsanInterceptorContext *)ctx)->pc, (uptr)m)
|
||||
|
||||
#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
|
||||
|
@ -31,11 +31,10 @@ namespace __tsan {
|
||||
|
||||
class ScopedAnnotation {
|
||||
public:
|
||||
ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l,
|
||||
uptr pc)
|
||||
ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
|
||||
: thr_(thr) {
|
||||
FuncEntry(thr_, pc);
|
||||
DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l);
|
||||
DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
|
||||
}
|
||||
|
||||
~ScopedAnnotation() {
|
||||
@ -46,18 +45,20 @@ class ScopedAnnotation {
|
||||
ThreadState *const thr_;
|
||||
};
|
||||
|
||||
#define SCOPED_ANNOTATION(typ) \
|
||||
#define SCOPED_ANNOTATION_RET(typ, ret) \
|
||||
if (!flags()->enable_annotations) \
|
||||
return; \
|
||||
return ret; \
|
||||
ThreadState *thr = cur_thread(); \
|
||||
const uptr caller_pc = (uptr)__builtin_return_address(0); \
|
||||
StatInc(thr, StatAnnotation); \
|
||||
StatInc(thr, Stat##typ); \
|
||||
ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \
|
||||
ScopedAnnotation sa(thr, __func__, caller_pc); \
|
||||
const uptr pc = StackTrace::GetCurrentPc(); \
|
||||
(void)pc; \
|
||||
/**/
|
||||
|
||||
#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
|
||||
|
||||
static const int kMaxDescLen = 128;
|
||||
|
||||
struct ExpectRace {
|
||||
@ -252,12 +253,12 @@ void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
|
||||
|
||||
void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
|
||||
SCOPED_ANNOTATION(AnnotateRWLockCreate);
|
||||
MutexCreate(thr, pc, m, true, true, false);
|
||||
MutexCreate(thr, pc, m, MutexFlagWriteReentrant);
|
||||
}
|
||||
|
||||
void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
|
||||
SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
|
||||
MutexCreate(thr, pc, m, true, true, true);
|
||||
MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit);
|
||||
}
|
||||
|
||||
void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
|
||||
@ -269,9 +270,9 @@ void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
|
||||
uptr is_w) {
|
||||
SCOPED_ANNOTATION(AnnotateRWLockAcquired);
|
||||
if (is_w)
|
||||
MutexLock(thr, pc, m);
|
||||
MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
|
||||
else
|
||||
MutexReadLock(thr, pc, m);
|
||||
MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
|
||||
}
|
||||
|
||||
void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
|
||||
@ -458,4 +459,95 @@ void INTERFACE_ATTRIBUTE
|
||||
AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
|
||||
void INTERFACE_ATTRIBUTE
|
||||
AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
|
||||
|
||||
// Note: the parameter is called flagz, because flags is already taken
|
||||
// by the global function that returns flags.
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_create(void *m, unsigned flagz) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_create);
|
||||
MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask);
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_destroy(void *m, unsigned flagz) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_destroy);
|
||||
MutexDestroy(thr, pc, (uptr)m);
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
|
||||
if (!(flagz & MutexFlagTryLock)) {
|
||||
if (flagz & MutexFlagReadLock)
|
||||
MutexPreReadLock(thr, pc, (uptr)m);
|
||||
else
|
||||
MutexPreLock(thr, pc, (uptr)m);
|
||||
}
|
||||
ThreadIgnoreBegin(thr, pc, false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, false);
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_post_lock);
|
||||
ThreadIgnoreSyncEnd(thr, pc);
|
||||
ThreadIgnoreEnd(thr, pc);
|
||||
if (!(flagz & MutexFlagTryLockFailed)) {
|
||||
if (flagz & MutexFlagReadLock)
|
||||
MutexPostReadLock(thr, pc, (uptr)m, flagz);
|
||||
else
|
||||
MutexPostLock(thr, pc, (uptr)m, flagz, rec);
|
||||
}
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
|
||||
SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
|
||||
int ret = 0;
|
||||
if (flagz & MutexFlagReadLock) {
|
||||
CHECK(!(flagz & MutexFlagRecursiveUnlock));
|
||||
MutexReadUnlock(thr, pc, (uptr)m);
|
||||
} else {
|
||||
ret = MutexUnlock(thr, pc, (uptr)m, flagz);
|
||||
}
|
||||
ThreadIgnoreBegin(thr, pc, false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
|
||||
ThreadIgnoreSyncEnd(thr, pc);
|
||||
ThreadIgnoreEnd(thr, pc);
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
|
||||
ThreadIgnoreBegin(thr, pc, false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, false);
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_post_signal);
|
||||
ThreadIgnoreSyncEnd(thr, pc);
|
||||
ThreadIgnoreEnd(thr, pc);
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
|
||||
// Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
|
||||
ThreadIgnoreSyncEnd(thr, pc);
|
||||
ThreadIgnoreEnd(thr, pc);
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_post_divert);
|
||||
ThreadIgnoreBegin(thr, pc, false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, false);
|
||||
}
|
||||
} // extern "C"
|
||||
|
@ -468,12 +468,14 @@ static morder convert_morder(morder mo) {
|
||||
}
|
||||
|
||||
#define SCOPED_ATOMIC(func, ...) \
|
||||
ThreadState *const thr = cur_thread(); \
|
||||
if (thr->ignore_sync || thr->ignore_interceptors) { \
|
||||
ProcessPendingSignals(thr); \
|
||||
return NoTsanAtomic##func(__VA_ARGS__); \
|
||||
} \
|
||||
const uptr callpc = (uptr)__builtin_return_address(0); \
|
||||
uptr pc = StackTrace::GetCurrentPc(); \
|
||||
mo = convert_morder(mo); \
|
||||
ThreadState *const thr = cur_thread(); \
|
||||
if (thr->ignore_interceptors) \
|
||||
return NoTsanAtomic##func(__VA_ARGS__); \
|
||||
AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
|
||||
ScopedAtomic sa(thr, callpc, a, mo, __func__); \
|
||||
return Atomic##func(thr, pc, __VA_ARGS__); \
|
||||
|
@ -180,8 +180,8 @@ void __tsan_java_mutex_lock(jptr addr) {
|
||||
CHECK_GE(addr, jctx->heap_begin);
|
||||
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
|
||||
|
||||
MutexCreate(thr, pc, addr, true, true, true);
|
||||
MutexLock(thr, pc, addr);
|
||||
MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
|
||||
MutexFlagDoPreLockOnPostLock);
|
||||
}
|
||||
|
||||
void __tsan_java_mutex_unlock(jptr addr) {
|
||||
@ -201,8 +201,8 @@ void __tsan_java_mutex_read_lock(jptr addr) {
|
||||
CHECK_GE(addr, jctx->heap_begin);
|
||||
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
|
||||
|
||||
MutexCreate(thr, pc, addr, true, true, true);
|
||||
MutexReadLock(thr, pc, addr);
|
||||
MutexPostReadLock(thr, pc, addr, MutexFlagLinkerInit |
|
||||
MutexFlagWriteReentrant | MutexFlagDoPreLockOnPostLock);
|
||||
}
|
||||
|
||||
void __tsan_java_mutex_read_unlock(jptr addr) {
|
||||
@ -223,8 +223,8 @@ void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
|
||||
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
|
||||
CHECK_GT(rec, 0);
|
||||
|
||||
MutexCreate(thr, pc, addr, true, true, true);
|
||||
MutexLock(thr, pc, addr, rec);
|
||||
MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
|
||||
MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock, rec);
|
||||
}
|
||||
|
||||
int __tsan_java_mutex_unlock_rec(jptr addr) {
|
||||
@ -234,7 +234,7 @@ int __tsan_java_mutex_unlock_rec(jptr addr) {
|
||||
CHECK_GE(addr, jctx->heap_begin);
|
||||
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
|
||||
|
||||
return MutexUnlock(thr, pc, addr, true);
|
||||
return MutexUnlock(thr, pc, addr, MutexFlagRecursiveUnlock);
|
||||
}
|
||||
|
||||
void __tsan_java_acquire(jptr addr) {
|
||||
|
@ -980,21 +980,21 @@ void FuncExit(ThreadState *thr) {
|
||||
thr->shadow_stack_pos--;
|
||||
}
|
||||
|
||||
void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
|
||||
void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
|
||||
DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
|
||||
thr->ignore_reads_and_writes++;
|
||||
CHECK_GT(thr->ignore_reads_and_writes, 0);
|
||||
thr->fast_state.SetIgnoreBit();
|
||||
#if !SANITIZER_GO
|
||||
if (!ctx->after_multithreaded_fork)
|
||||
if (save_stack && !ctx->after_multithreaded_fork)
|
||||
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
|
||||
#endif
|
||||
}
|
||||
|
||||
void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
|
||||
DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
|
||||
CHECK_GT(thr->ignore_reads_and_writes, 0);
|
||||
thr->ignore_reads_and_writes--;
|
||||
CHECK_GE(thr->ignore_reads_and_writes, 0);
|
||||
if (thr->ignore_reads_and_writes == 0) {
|
||||
thr->fast_state.ClearIgnoreBit();
|
||||
#if !SANITIZER_GO
|
||||
@ -1011,20 +1011,20 @@ uptr __tsan_testonly_shadow_stack_current_size() {
|
||||
}
|
||||
#endif
|
||||
|
||||
void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
|
||||
void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
|
||||
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
|
||||
thr->ignore_sync++;
|
||||
CHECK_GT(thr->ignore_sync, 0);
|
||||
#if !SANITIZER_GO
|
||||
if (!ctx->after_multithreaded_fork)
|
||||
if (save_stack && !ctx->after_multithreaded_fork)
|
||||
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
|
||||
#endif
|
||||
}
|
||||
|
||||
void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
|
||||
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
|
||||
CHECK_GT(thr->ignore_sync, 0);
|
||||
thr->ignore_sync--;
|
||||
CHECK_GE(thr->ignore_sync, 0);
|
||||
#if !SANITIZER_GO
|
||||
if (thr->ignore_sync == 0)
|
||||
thr->sync_ignore_set.Reset();
|
||||
|
@ -546,6 +546,10 @@ struct Context {
|
||||
|
||||
extern Context *ctx; // The one and the only global runtime context.
|
||||
|
||||
ALWAYS_INLINE Flags *flags() {
|
||||
return &ctx->flags;
|
||||
}
|
||||
|
||||
struct ScopedIgnoreInterceptors {
|
||||
ScopedIgnoreInterceptors() {
|
||||
#if !SANITIZER_GO
|
||||
@ -707,9 +711,9 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
|
||||
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
|
||||
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
|
||||
|
||||
void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
|
||||
void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
|
||||
void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
|
||||
void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
|
||||
void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
|
||||
void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
|
||||
|
||||
void FuncEntry(ThreadState *thr, uptr pc);
|
||||
@ -731,13 +735,16 @@ void ProcDestroy(Processor *proc);
|
||||
void ProcWire(Processor *proc, ThreadState *thr);
|
||||
void ProcUnwire(Processor *proc, ThreadState *thr);
|
||||
|
||||
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
|
||||
bool rw, bool recursive, bool linker_init);
|
||||
// Note: the parameter is called flagz, because flags is already taken
|
||||
// by the global function that returns flags.
|
||||
void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
||||
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
|
||||
void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
|
||||
bool try_lock = false);
|
||||
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
|
||||
void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
|
||||
void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
||||
void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
|
||||
int rec = 1);
|
||||
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
||||
void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
||||
void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
|
||||
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
|
||||
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
|
||||
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
|
||||
|
@ -62,20 +62,17 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
|
||||
OutputReport(thr, rep);
|
||||
}
|
||||
|
||||
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
|
||||
bool rw, bool recursive, bool linker_init) {
|
||||
DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
|
||||
void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
|
||||
DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
|
||||
StatInc(thr, StatMutexCreate);
|
||||
if (!linker_init && IsAppMem(addr)) {
|
||||
if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
|
||||
CHECK(!thr->is_freeing);
|
||||
thr->is_freeing = true;
|
||||
MemoryWrite(thr, pc, addr, kSizeLog1);
|
||||
thr->is_freeing = false;
|
||||
}
|
||||
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
|
||||
s->is_rw = rw;
|
||||
s->is_recursive = recursive;
|
||||
s->is_linker_init = linker_init;
|
||||
s->SetFlags(flagz & MutexCreationFlagMask);
|
||||
if (!SANITIZER_GO && s->creation_stack_id == 0)
|
||||
s->creation_stack_id = CurrentStackId(thr, pc);
|
||||
s->mtx.Unlock();
|
||||
@ -87,7 +84,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
|
||||
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
|
||||
if (s == 0)
|
||||
return;
|
||||
if (s->is_linker_init) {
|
||||
if (s->IsFlagSet(MutexFlagLinkerInit)) {
|
||||
// Destroy is no-op for linker-initialized mutexes.
|
||||
s->mtx.Unlock();
|
||||
return;
|
||||
@ -100,8 +97,8 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
|
||||
bool unlock_locked = false;
|
||||
if (flags()->report_destroy_locked
|
||||
&& s->owner_tid != SyncVar::kInvalidTid
|
||||
&& !s->is_broken) {
|
||||
s->is_broken = true;
|
||||
&& !s->IsFlagSet(MutexFlagBroken)) {
|
||||
s->SetFlags(MutexFlagBroken);
|
||||
unlock_locked = true;
|
||||
}
|
||||
u64 mid = s->GetId();
|
||||
@ -141,12 +138,33 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
|
||||
// s will be destroyed and freed in MetaMap::FreeBlock.
|
||||
}
|
||||
|
||||
void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
|
||||
DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
|
||||
CHECK_GT(rec, 0);
|
||||
void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
|
||||
DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
|
||||
if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
|
||||
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
|
||||
s->UpdateFlags(flagz);
|
||||
if (s->owner_tid != thr->tid) {
|
||||
Callback cb(thr, pc);
|
||||
ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
|
||||
s->mtx.ReadUnlock();
|
||||
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
|
||||
} else {
|
||||
s->mtx.ReadUnlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
|
||||
DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
|
||||
thr->tid, addr, flagz, rec);
|
||||
if (flagz & MutexFlagRecursiveLock)
|
||||
CHECK_GT(rec, 0);
|
||||
else
|
||||
rec = 1;
|
||||
if (IsAppMem(addr))
|
||||
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
|
||||
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
|
||||
s->UpdateFlags(flagz);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
|
||||
bool report_double_lock = false;
|
||||
@ -156,38 +174,43 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
|
||||
s->last_lock = thr->fast_state.raw();
|
||||
} else if (s->owner_tid == thr->tid) {
|
||||
CHECK_GT(s->recursion, 0);
|
||||
} else if (flags()->report_mutex_bugs && !s->is_broken) {
|
||||
s->is_broken = true;
|
||||
} else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
|
||||
s->SetFlags(MutexFlagBroken);
|
||||
report_double_lock = true;
|
||||
}
|
||||
if (s->recursion == 0) {
|
||||
const bool first = s->recursion == 0;
|
||||
s->recursion += rec;
|
||||
if (first) {
|
||||
StatInc(thr, StatMutexLock);
|
||||
AcquireImpl(thr, pc, &s->clock);
|
||||
AcquireImpl(thr, pc, &s->read_clock);
|
||||
} else if (!s->is_recursive) {
|
||||
} else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
|
||||
StatInc(thr, StatMutexRecLock);
|
||||
}
|
||||
s->recursion += rec;
|
||||
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
|
||||
if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
|
||||
bool pre_lock = false;
|
||||
if (first && common_flags()->detect_deadlocks) {
|
||||
pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
|
||||
!(flagz & MutexFlagTryLock);
|
||||
Callback cb(thr, pc);
|
||||
if (!try_lock)
|
||||
if (pre_lock)
|
||||
ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
|
||||
ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
|
||||
ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
|
||||
}
|
||||
u64 mid = s->GetId();
|
||||
s->mtx.Unlock();
|
||||
// Can't touch s after this point.
|
||||
s = 0;
|
||||
if (report_double_lock)
|
||||
ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
|
||||
if (common_flags()->detect_deadlocks) {
|
||||
if (first && pre_lock && common_flags()->detect_deadlocks) {
|
||||
Callback cb(thr, pc);
|
||||
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
|
||||
}
|
||||
}
|
||||
|
||||
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
|
||||
DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
|
||||
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
|
||||
DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
|
||||
if (IsAppMem(addr))
|
||||
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
|
||||
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
|
||||
@ -196,12 +219,12 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
|
||||
int rec = 0;
|
||||
bool report_bad_unlock = false;
|
||||
if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
|
||||
if (flags()->report_mutex_bugs && !s->is_broken) {
|
||||
s->is_broken = true;
|
||||
if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
|
||||
s->SetFlags(MutexFlagBroken);
|
||||
report_bad_unlock = true;
|
||||
}
|
||||
} else {
|
||||
rec = all ? s->recursion : 1;
|
||||
rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
|
||||
s->recursion -= rec;
|
||||
if (s->recursion == 0) {
|
||||
StatInc(thr, StatMutexUnlock);
|
||||
@ -229,36 +252,53 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
|
||||
return rec;
|
||||
}
|
||||
|
||||
void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
|
||||
DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
|
||||
void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
|
||||
DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
|
||||
if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
|
||||
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
|
||||
s->UpdateFlags(flagz);
|
||||
Callback cb(thr, pc);
|
||||
ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
|
||||
s->mtx.ReadUnlock();
|
||||
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
|
||||
}
|
||||
}
|
||||
|
||||
void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
|
||||
DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
|
||||
StatInc(thr, StatMutexReadLock);
|
||||
if (IsAppMem(addr))
|
||||
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
|
||||
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
|
||||
s->UpdateFlags(flagz);
|
||||
thr->fast_state.IncrementEpoch();
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
|
||||
bool report_bad_lock = false;
|
||||
if (s->owner_tid != SyncVar::kInvalidTid) {
|
||||
if (flags()->report_mutex_bugs && !s->is_broken) {
|
||||
s->is_broken = true;
|
||||
if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
|
||||
s->SetFlags(MutexFlagBroken);
|
||||
report_bad_lock = true;
|
||||
}
|
||||
}
|
||||
AcquireImpl(thr, pc, &s->clock);
|
||||
s->last_lock = thr->fast_state.raw();
|
||||
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
|
||||
if (common_flags()->detect_deadlocks && s->recursion == 0) {
|
||||
bool pre_lock = false;
|
||||
if (common_flags()->detect_deadlocks) {
|
||||
pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
|
||||
!(flagz & MutexFlagTryLock);
|
||||
Callback cb(thr, pc);
|
||||
if (!trylock)
|
||||
if (pre_lock)
|
||||
ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
|
||||
ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
|
||||
ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
|
||||
}
|
||||
u64 mid = s->GetId();
|
||||
s->mtx.ReadUnlock();
|
||||
// Can't touch s after this point.
|
||||
s = 0;
|
||||
if (report_bad_lock)
|
||||
ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
|
||||
if (common_flags()->detect_deadlocks) {
|
||||
if (pre_lock && common_flags()->detect_deadlocks) {
|
||||
Callback cb(thr, pc);
|
||||
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
|
||||
}
|
||||
@ -274,8 +314,8 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
||||
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
|
||||
bool report_bad_unlock = false;
|
||||
if (s->owner_tid != SyncVar::kInvalidTid) {
|
||||
if (flags()->report_mutex_bugs && !s->is_broken) {
|
||||
s->is_broken = true;
|
||||
if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
|
||||
s->SetFlags(MutexFlagBroken);
|
||||
report_bad_unlock = true;
|
||||
}
|
||||
}
|
||||
@ -323,8 +363,8 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
||||
} else {
|
||||
StatInc(thr, StatMutexRecUnlock);
|
||||
}
|
||||
} else if (!s->is_broken) {
|
||||
s->is_broken = true;
|
||||
} else if (!s->IsFlagSet(MutexFlagBroken)) {
|
||||
s->SetFlags(MutexFlagBroken);
|
||||
report_bad_unlock = true;
|
||||
}
|
||||
thr->mset.Del(s->GetId(), write);
|
||||
|
@ -153,6 +153,16 @@ void StatOutput(u64 *stat) {
|
||||
name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange ";
|
||||
name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange ";
|
||||
name[StatAnnotateThreadName] = " ThreadName ";
|
||||
name[Stat__tsan_mutex_create] = " __tsan_mutex_create ";
|
||||
name[Stat__tsan_mutex_destroy] = " __tsan_mutex_destroy ";
|
||||
name[Stat__tsan_mutex_pre_lock] = " __tsan_mutex_pre_lock ";
|
||||
name[Stat__tsan_mutex_post_lock] = " __tsan_mutex_post_lock ";
|
||||
name[Stat__tsan_mutex_pre_unlock] = " __tsan_mutex_pre_unlock ";
|
||||
name[Stat__tsan_mutex_post_unlock] = " __tsan_mutex_post_unlock ";
|
||||
name[Stat__tsan_mutex_pre_signal] = " __tsan_mutex_pre_signal ";
|
||||
name[Stat__tsan_mutex_post_signal] = " __tsan_mutex_post_signal ";
|
||||
name[Stat__tsan_mutex_pre_divert] = " __tsan_mutex_pre_divert ";
|
||||
name[Stat__tsan_mutex_post_divert] = " __tsan_mutex_post_divert ";
|
||||
|
||||
name[StatMtxTotal] = "Contentionz ";
|
||||
name[StatMtxTrace] = " Trace ";
|
||||
|
@ -157,6 +157,16 @@ enum StatType {
|
||||
StatAnnotatePublishMemoryRange,
|
||||
StatAnnotateUnpublishMemoryRange,
|
||||
StatAnnotateThreadName,
|
||||
Stat__tsan_mutex_create,
|
||||
Stat__tsan_mutex_destroy,
|
||||
Stat__tsan_mutex_pre_lock,
|
||||
Stat__tsan_mutex_post_lock,
|
||||
Stat__tsan_mutex_pre_unlock,
|
||||
Stat__tsan_mutex_post_unlock,
|
||||
Stat__tsan_mutex_pre_signal,
|
||||
Stat__tsan_mutex_post_signal,
|
||||
Stat__tsan_mutex_pre_divert,
|
||||
Stat__tsan_mutex_post_divert,
|
||||
|
||||
// Internal mutex contentionz.
|
||||
StatMtxTotal,
|
||||
|
@ -42,10 +42,7 @@ void SyncVar::Reset(Processor *proc) {
|
||||
owner_tid = kInvalidTid;
|
||||
last_lock = 0;
|
||||
recursion = 0;
|
||||
is_rw = 0;
|
||||
is_recursive = 0;
|
||||
is_broken = 0;
|
||||
is_linker_init = 0;
|
||||
atomic_store_relaxed(&flags, 0);
|
||||
|
||||
if (proc == 0) {
|
||||
CHECK_EQ(clock.size(), 0);
|
||||
|
@ -23,6 +23,29 @@
|
||||
|
||||
namespace __tsan {
|
||||
|
||||
// These need to match __tsan_mutex_* flags defined in tsan_interface.h.
|
||||
// See documentation there as well.
|
||||
enum MutexFlags {
|
||||
MutexFlagLinkerInit = 1 << 0, // __tsan_mutex_linker_init
|
||||
MutexFlagWriteReentrant = 1 << 1, // __tsan_mutex_write_reentrant
|
||||
MutexFlagReadReentrant = 1 << 2, // __tsan_mutex_read_reentrant
|
||||
MutexFlagReadLock = 1 << 3, // __tsan_mutex_read_lock
|
||||
MutexFlagTryLock = 1 << 4, // __tsan_mutex_try_lock
|
||||
MutexFlagTryLockFailed = 1 << 5, // __tsan_mutex_try_lock_failed
|
||||
MutexFlagRecursiveLock = 1 << 6, // __tsan_mutex_recursive_lock
|
||||
MutexFlagRecursiveUnlock = 1 << 7, // __tsan_mutex_recursive_unlock
|
||||
|
||||
// The following flags are runtime private.
|
||||
// Mutex API misuse was detected, so don't report any more.
|
||||
MutexFlagBroken = 1 << 30,
|
||||
// We did not intercept pre lock event, so handle it on post lock.
|
||||
MutexFlagDoPreLockOnPostLock = 1 << 29,
|
||||
// Must list all mutex creation flags.
|
||||
MutexCreationFlagMask = MutexFlagLinkerInit |
|
||||
MutexFlagWriteReentrant |
|
||||
MutexFlagReadReentrant,
|
||||
};
|
||||
|
||||
struct SyncVar {
|
||||
SyncVar();
|
||||
|
||||
@ -35,10 +58,7 @@ struct SyncVar {
|
||||
int owner_tid; // Set only by exclusive owners.
|
||||
u64 last_lock;
|
||||
int recursion;
|
||||
bool is_rw;
|
||||
bool is_recursive;
|
||||
bool is_broken;
|
||||
bool is_linker_init;
|
||||
atomic_uint32_t flags;
|
||||
u32 next; // in MetaMap
|
||||
DDMutex dd;
|
||||
SyncClock read_clock; // Used for rw mutexes only.
|
||||
@ -61,6 +81,26 @@ struct SyncVar {
|
||||
*uid = id >> 48;
|
||||
return (uptr)GetLsb(id, 48);
|
||||
}
|
||||
|
||||
bool IsFlagSet(u32 f) const {
|
||||
return atomic_load_relaxed(&flags);
|
||||
}
|
||||
|
||||
void SetFlags(u32 f) {
|
||||
atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f);
|
||||
}
|
||||
|
||||
void UpdateFlags(u32 flagz) {
|
||||
// Filter out operation flags.
|
||||
if (!(flagz & MutexCreationFlagMask))
|
||||
return;
|
||||
u32 current = atomic_load_relaxed(&flags);
|
||||
if (current & MutexCreationFlagMask)
|
||||
return;
|
||||
// Note: this can be called from MutexPostReadLock which holds only read
|
||||
// lock on the SyncVar.
|
||||
atomic_store_relaxed(&flags, current | (flagz & MutexCreationFlagMask));
|
||||
}
|
||||
};
|
||||
|
||||
/* MetaMap allows to map arbitrary user pointers onto various descriptors.
|
||||
|
91
compiler-rt/test/tsan/custom_mutex.h
Normal file
91
compiler-rt/test/tsan/custom_mutex.h
Normal file
@ -0,0 +1,91 @@
|
||||
#include "test.h"
|
||||
#include <atomic>
|
||||
#include <vector>
|
||||
#include <sanitizer/tsan_interface.h>
|
||||
|
||||
// A very primitive mutex annotated with tsan annotations.
|
||||
class Mutex {
|
||||
public:
|
||||
Mutex(bool prof = true)
|
||||
: prof_(prof)
|
||||
, locked_(false)
|
||||
, seq_(0) {
|
||||
__tsan_mutex_create(this, 0);
|
||||
}
|
||||
|
||||
~Mutex() {
|
||||
__tsan_mutex_destroy(this, 0);
|
||||
}
|
||||
|
||||
void Lock() {
|
||||
__tsan_mutex_pre_lock(this, 0);
|
||||
LockImpl();
|
||||
__tsan_mutex_post_lock(this, 0, 0);
|
||||
}
|
||||
|
||||
bool TryLock() {
|
||||
__tsan_mutex_pre_lock(this, __tsan_mutex_try_lock);
|
||||
bool ok = TryLockImpl();
|
||||
__tsan_mutex_post_lock(this, __tsan_mutex_try_lock |
|
||||
(ok ? 0 : __tsan_mutex_try_lock_failed), 0);
|
||||
return ok;
|
||||
}
|
||||
|
||||
void Unlock() {
|
||||
__tsan_mutex_pre_unlock(this, 0);
|
||||
UnlockImpl();
|
||||
__tsan_mutex_post_unlock(this, 0);
|
||||
}
|
||||
|
||||
void Wait() {
|
||||
for (int seq = seq_; seq == seq_;) {
|
||||
Unlock();
|
||||
usleep(100);
|
||||
Lock();
|
||||
}
|
||||
}
|
||||
|
||||
void Broadcast() {
|
||||
__tsan_mutex_pre_signal(this, 0);
|
||||
LockImpl(false);
|
||||
seq_++;
|
||||
UnlockImpl();
|
||||
__tsan_mutex_post_signal(this, 0);
|
||||
}
|
||||
|
||||
private:
|
||||
const bool prof_;
|
||||
std::atomic<bool> locked_;
|
||||
int seq_;
|
||||
|
||||
// This models mutex profiling subsystem.
|
||||
static Mutex prof_mu_;
|
||||
static int prof_data_;
|
||||
|
||||
void LockImpl(bool prof = true) {
|
||||
while (!TryLockImpl())
|
||||
usleep(100);
|
||||
if (prof && prof_)
|
||||
Prof();
|
||||
}
|
||||
|
||||
bool TryLockImpl() {
|
||||
return !locked_.exchange(true);
|
||||
}
|
||||
|
||||
void UnlockImpl() {
|
||||
locked_.store(false);
|
||||
}
|
||||
|
||||
void Prof() {
|
||||
// This happens inside of mutex lock annotations.
|
||||
__tsan_mutex_pre_divert(this, 0);
|
||||
prof_mu_.Lock();
|
||||
prof_data_++;
|
||||
prof_mu_.Unlock();
|
||||
__tsan_mutex_post_divert(this, 0);
|
||||
}
|
||||
};
|
||||
|
||||
Mutex Mutex::prof_mu_(false);
|
||||
int Mutex::prof_data_;
|
31
compiler-rt/test/tsan/custom_mutex0.cc
Normal file
31
compiler-rt/test/tsan/custom_mutex0.cc
Normal file
@ -0,0 +1,31 @@
|
||||
// RUN: %clangxx_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s
|
||||
#include "custom_mutex.h"
|
||||
|
||||
// Test that custom annoations provide normal mutex synchronization
|
||||
// (no race reports for properly protected critical sections).
|
||||
|
||||
Mutex mu;
|
||||
long data;
|
||||
|
||||
void *thr(void *arg) {
|
||||
barrier_wait(&barrier);
|
||||
mu.Lock();
|
||||
data++;
|
||||
mu.Unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main() {
|
||||
barrier_init(&barrier, 2);
|
||||
pthread_t th;
|
||||
pthread_create(&th, 0, thr, 0);
|
||||
barrier_wait(&barrier);
|
||||
mu.Lock();
|
||||
data++;
|
||||
mu.Unlock();
|
||||
pthread_join(th, 0);
|
||||
fprintf(stderr, "DONE\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// CHECK: DONE
|
39
compiler-rt/test/tsan/custom_mutex1.cc
Normal file
39
compiler-rt/test/tsan/custom_mutex1.cc
Normal file
@ -0,0 +1,39 @@
|
||||
// RUN: %clangxx_tsan -O1 %s -o %t && %deflake %run %t 2>&1 | FileCheck %s
|
||||
#include "custom_mutex.h"
|
||||
|
||||
// Test that failed TryLock does not induce parasitic synchronization.
|
||||
|
||||
Mutex mu;
|
||||
long data;
|
||||
|
||||
void *thr(void *arg) {
|
||||
mu.Lock();
|
||||
data++;
|
||||
mu.Unlock();
|
||||
mu.Lock();
|
||||
barrier_wait(&barrier);
|
||||
barrier_wait(&barrier);
|
||||
mu.Unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main() {
|
||||
barrier_init(&barrier, 2);
|
||||
pthread_t th;
|
||||
pthread_create(&th, 0, thr, 0);
|
||||
barrier_wait(&barrier);
|
||||
if (mu.TryLock()) {
|
||||
fprintf(stderr, "TryLock succeeded, should not\n");
|
||||
exit(0);
|
||||
}
|
||||
data++;
|
||||
barrier_wait(&barrier);
|
||||
pthread_join(th, 0);
|
||||
fprintf(stderr, "DONE\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// CHECK: ThreadSanitizer: data race
|
||||
// CHECK-NEXT: Write of size 8 at {{.*}} by main thread:
|
||||
// CHECK-NEXT: #0 main {{.*}}custom_mutex1.cc:29
|
||||
// CHECK: DONE
|
34
compiler-rt/test/tsan/custom_mutex2.cc
Normal file
34
compiler-rt/test/tsan/custom_mutex2.cc
Normal file
@ -0,0 +1,34 @@
|
||||
// RUN: %clangxx_tsan -O1 %s -o %t && %deflake %run %t 2>&1 | FileCheck %s
|
||||
#include "custom_mutex.h"
|
||||
|
||||
// Test that Broadcast does not induce parasitic synchronization.
|
||||
|
||||
Mutex mu;
|
||||
long data;
|
||||
|
||||
void *thr(void *arg) {
|
||||
barrier_wait(&barrier);
|
||||
mu.Lock();
|
||||
data++;
|
||||
mu.Unlock();
|
||||
data++;
|
||||
mu.Broadcast();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main() {
|
||||
barrier_init(&barrier, 2);
|
||||
pthread_t th;
|
||||
pthread_create(&th, 0, thr, 0);
|
||||
mu.Lock();
|
||||
barrier_wait(&barrier);
|
||||
while (data == 0)
|
||||
mu.Wait();
|
||||
mu.Unlock();
|
||||
pthread_join(th, 0);
|
||||
fprintf(stderr, "DONE\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// CHECK: ThreadSanitizer: data race
|
||||
// CHECK: DONE
|
Loading…
Reference in New Issue
Block a user