tsan: add concurrent hashmap for standalone deadlock detector

llvm-svn: 202826
This commit is contained in:
Dmitry Vyukov 2014-03-04 11:39:56 +00:00
parent d92a12302b
commit 54a0303fa8
4 changed files with 321 additions and 54 deletions

View File

@ -0,0 +1,207 @@
//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Concurrent uptr->T hashmap.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ADDRHASHMAP_H
#define SANITIZER_ADDRHASHMAP_H
#include "sanitizer_common.h"
#include "sanitizer_mutex.h"
#include "sanitizer_atomic.h"
namespace __sanitizer {
// Concurrent uptr->T hashmap.
// T must be a POD type, kSize is preferrably a prime but can be any number.
// The hashmap is fixed size, it crashes on overflow.
// Usage example:
//
// typedef AddrHashMap<uptr, 11> Map;
// Map m;
// {
// Map::Handle h(&m, addr);
// use h.operator->() to access the data
// if h.created() then the element was just created, and the current thread
// has exclusive access to it
// otherwise the current thread has only read access to the data
// }
// {
// Map::Handle h(&m, addr, true);
// this will remove the data from the map in Handle dtor
// the current thread has exclusive access to the data
// if !h.exists() then the element never existed
// }
template<typename T, uptr kSize>
class AddrHashMap {
private:
struct Cell {
RWMutex mtx;
atomic_uintptr_t addr;
T val;
};
public:
AddrHashMap();
class Handle {
public:
Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove = false);
~Handle();
T *operator -> ();
bool created() const;
bool exists() const;
private:
AddrHashMap<T, kSize> *map_;
Cell *cell_;
uptr addr_;
bool created_;
bool remove_;
bool write_;
};
private:
friend class Handle;
Cell *table_;
static const uptr kRemoved = 1;
Cell *acquire(uptr addr, bool remove, bool *created, bool *write);
void release(uptr addr, bool remove, bool write, Cell *c);
uptr hash(uptr addr);
};
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
bool remove) {
map_ = map;
addr_ = addr;
remove_ = remove;
cell_ = map_->acquire(addr_, remove_, &created_, &write_);
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::~Handle() {
map_->release(addr_, remove_, write_, cell_);
}
template<typename T, uptr kSize>
T *AddrHashMap<T, kSize>::Handle::operator -> () {
return &cell_->val;
}
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::created() const {
return created_;
}
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::exists() const {
return cell_ != 0;
}
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::AddrHashMap() {
table_ = (Cell*)MmapOrDie(kSize * sizeof(Cell), "AddrHashMap");
}
template<typename T, uptr kSize>
typename AddrHashMap<T, kSize>::Cell *AddrHashMap<T, kSize>::acquire(uptr addr,
bool remove, bool *created, bool *write) {
// When we access the element associated with addr,
// we lock its home cell (the cell associated with hash(addr).
// If the element was just created or is going to be removed,
// we lock the cell in write mode. Otherwise we lock in read mode.
// The locking protects the object lifetime (it's not removed while
// somebody else accesses it). And also it helps to resolve concurrent
// inserts.
// Note that the home cell is not necessary the cell where the element is
// stored.
*write = false;
*created = false;
uptr h0 = hash(addr);
Cell *c0 = &table_[h0];
// First try to find an existing element under read lock.
if (!remove) {
c0->mtx.ReadLock();
uptr h = h0;
for (;;) {
Cell *c = &table_[h];
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
if (addr1 == 0) // empty cell denotes end of the cell chain for the elem
break;
if (addr1 == addr) // ok, found it
return c;
h++;
if (h == kSize)
h = 0;
CHECK_NE(h, h0); // made the full cycle
}
c0->mtx.ReadUnlock();
}
// Now try to create it under write lock.
*write = true;
c0->mtx.Lock();
uptr h = h0;
for (;;) {
Cell *c = &table_[h];
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
if (addr1 == addr) // another thread has inserted it ahead of us
return c;
if (remove && addr1 == 0) { // the element has not existed before
c0->mtx.Unlock();
return 0;
}
if (!remove && (addr1 == 0 ||addr1 == kRemoved) &&
atomic_compare_exchange_strong(&c->addr, &addr1, addr,
memory_order_acq_rel)) {
// we've created the element
*created = true;
return c;
}
h++;
if (h == kSize)
h = 0;
CHECK_NE(h, h0); // made the full cycle
}
}
template<typename T, uptr kSize>
void AddrHashMap<T, kSize>::release(uptr addr, bool remove, bool write,
Cell *c) {
if (c == 0)
return;
// if we are going to remove, we must hold write lock
CHECK_EQ(!remove || write, true);
CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), addr);
// denote that the cell is empty now
if (remove)
atomic_store(&c->addr, kRemoved, memory_order_release);
// unlock the home cell
uptr h0 = hash(addr);
Cell *c0 = &table_[h0];
if (write)
c0->mtx.Unlock();
else
c0->mtx.ReadUnlock();
}
template<typename T, uptr kSize>
uptr AddrHashMap<T, kSize>::hash(uptr addr) {
addr += addr << 10;
addr ^= addr >> 6;
return addr % kSize;
}
} // namespace __sanitizer
#endif // SANITIZER_ADDRHASHMAP_H

View File

@ -83,6 +83,88 @@ class BlockingMutex {
uptr owner_; // for debugging uptr owner_; // for debugging
}; };
// Reader-writer spin mutex.
class RWMutex {
public:
RWMutex() {
atomic_store(&state_, kUnlocked, memory_order_relaxed);
}
~RWMutex() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
void Lock() {
uptr cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
memory_order_acquire))
return;
LockSlow();
}
void Unlock() {
uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
DCHECK_NE(prev & kWriteLock, 0);
(void)prev;
}
void ReadLock() {
uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
ReadLockSlow();
}
void ReadUnlock() {
uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
(void)prev;
}
void CheckLocked() {
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
private:
atomic_uintptr_t state_;
enum {
kUnlocked = 0,
kWriteLock = 1,
kReadLock = 2
};
void NOINLINE LockSlow() {
for (int i = 0;; i++) {
if (i < 10)
proc_yield(10);
else
internal_sched_yield();
uptr cmp = atomic_load(&state_, memory_order_relaxed);
if (cmp == kUnlocked &&
atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire))
return;
}
}
void NOINLINE ReadLockSlow() {
for (int i = 0;; i++) {
if (i < 10)
proc_yield(10);
else
internal_sched_yield();
uptr prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
}
}
RWMutex(const RWMutex&);
void operator = (const RWMutex&);
};
template<typename MutexType> template<typename MutexType>
class GenericScopedLock { class GenericScopedLock {
public: public:
@ -123,6 +205,8 @@ class GenericScopedReadLock {
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock; typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock; typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
typedef GenericScopedLock<RWMutex> RWMutexLock;
typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
} // namespace __sanitizer } // namespace __sanitizer

View File

@ -9,16 +9,19 @@
#include "dd_rtl.h" #include "dd_rtl.h"
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stackdepot.h"
namespace __dsan { namespace __dsan {
static Context ctx0; static Context *ctx;
static Context * const ctx = &ctx0;
void Initialize() { void Initialize() {
static u64 ctx_mem[sizeof(Context) / sizeof(u64) + 1];
ctx = new(ctx_mem) Context();
InitializeInterceptors(); InitializeInterceptors();
//common_flags()->allow_addr2line = true; //common_flags()->allow_addr2line = true;
common_flags()->symbolize = true; common_flags()->symbolize = true;
@ -37,9 +40,9 @@ void ThreadDestroy(Thread *thr) {
static u32 CurrentStackTrace(Thread *thr) { static u32 CurrentStackTrace(Thread *thr) {
StackTrace trace; StackTrace trace;
thr->in_symbolizer = true; thr->ignore_interceptors = true;
trace.Unwind(1000, 0, 0, 0, 0, 0, false); trace.Unwind(1000, 0, 0, 0, 0, 0, false);
thr->in_symbolizer = false; thr->ignore_interceptors = false;
const uptr skip = 4; const uptr skip = 4;
if (trace.size <= skip) if (trace.size <= skip)
return 0; return 0;
@ -49,39 +52,9 @@ static u32 CurrentStackTrace(Thread *thr) {
static void PrintStackTrace(Thread *thr, u32 stk) { static void PrintStackTrace(Thread *thr, u32 stk) {
uptr size = 0; uptr size = 0;
const uptr *trace = StackDepotGet(stk, &size); const uptr *trace = StackDepotGet(stk, &size);
thr->in_symbolizer = true; thr->ignore_interceptors = true;
StackTrace::PrintStack(trace, size); StackTrace::PrintStack(trace, size);
thr->in_symbolizer = false; thr->ignore_interceptors = false;
}
static Mutex *FindMutex(Thread *thr, uptr m) {
SpinMutexLock l(&ctx->mutex_mtx);
for (Mutex *mtx = ctx->mutex_list; mtx; mtx = mtx->link) {
if (mtx->addr == m)
return mtx;
}
Mutex *mtx = (Mutex*)InternalAlloc(sizeof(*mtx));
internal_memset(mtx, 0, sizeof(*mtx));
mtx->addr = m;
ctx->dd->MutexInit(&mtx->dd, CurrentStackTrace(thr), ctx->mutex_seq++);
mtx->link = ctx->mutex_list;
ctx->mutex_list = mtx;
return mtx;
}
static Mutex *FindMutexAndRemove(uptr m) {
SpinMutexLock l(&ctx->mutex_mtx);
Mutex **prev = &ctx->mutex_list;
for (;;) {
Mutex *mtx = *prev;
if (mtx == 0)
return 0;
if (mtx->addr == m) {
*prev = mtx->link;
return mtx;
}
prev = &mtx->link;
}
} }
static void ReportDeadlock(Thread *thr, DDReport *rep) { static void ReportDeadlock(Thread *thr, DDReport *rep) {
@ -96,30 +69,34 @@ static void ReportDeadlock(Thread *thr, DDReport *rep) {
} }
void MutexLock(Thread *thr, uptr m, bool writelock, bool trylock) { void MutexLock(Thread *thr, uptr m, bool writelock, bool trylock) {
if (thr->in_symbolizer) if (thr->ignore_interceptors)
return; return;
Mutex *mtx = FindMutex(thr, m); DDReport *rep = 0;
DDReport *rep = ctx->dd->MutexLock(thr->dd_pt, thr->dd_lt, &mtx->dd, {
writelock, trylock); MutexHashMap::Handle h(&ctx->mutex_map, m);
if (h.created())
ctx->dd->MutexInit(&h->dd, CurrentStackTrace(thr), m);
rep = ctx->dd->MutexLock(thr->dd_pt, thr->dd_lt, &h->dd,
writelock, trylock);
}
if (rep) if (rep)
ReportDeadlock(thr, rep); ReportDeadlock(thr, rep);
} }
void MutexUnlock(Thread *thr, uptr m, bool writelock) { void MutexUnlock(Thread *thr, uptr m, bool writelock) {
if (thr->in_symbolizer) if (thr->ignore_interceptors)
return; return;
Mutex *mtx = FindMutex(thr, m); MutexHashMap::Handle h(&ctx->mutex_map, m);
ctx->dd->MutexUnlock(thr->dd_pt, thr->dd_lt, &mtx->dd, writelock); ctx->dd->MutexUnlock(thr->dd_pt, thr->dd_lt, &h->dd, writelock);
} }
void MutexDestroy(Thread *thr, uptr m) { void MutexDestroy(Thread *thr, uptr m) {
if (thr->in_symbolizer) if (thr->ignore_interceptors)
return; return;
Mutex *mtx = FindMutexAndRemove(m); MutexHashMap::Handle h(&ctx->mutex_map, m, true);
if (mtx == 0) if (!h.exists())
return; return;
ctx->dd->MutexDestroy(thr->dd_pt, thr->dd_lt, &mtx->dd); ctx->dd->MutexDestroy(thr->dd_pt, thr->dd_lt, &h->dd);
InternalFree(mtx);
} }
} // namespace __dsan } // namespace __dsan

View File

@ -12,13 +12,12 @@
#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h" #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_addrhashmap.h"
#include "sanitizer_common/sanitizer_mutex.h" #include "sanitizer_common/sanitizer_mutex.h"
namespace __dsan { namespace __dsan {
struct Mutex { struct Mutex {
Mutex *link;
uptr addr;
DDMutex dd; DDMutex dd;
}; };
@ -26,15 +25,15 @@ struct Thread {
DDPhysicalThread *dd_pt; DDPhysicalThread *dd_pt;
DDLogicalThread *dd_lt; DDLogicalThread *dd_lt;
bool in_symbolizer; bool ignore_interceptors;
}; };
typedef AddrHashMap<Mutex, 1000003> MutexHashMap;
struct Context { struct Context {
DDetector *dd; DDetector *dd;
SpinMutex mutex_mtx; MutexHashMap mutex_map;
Mutex *mutex_list;
u64 mutex_seq;
}; };
void InitializeInterceptors(); void InitializeInterceptors();