mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-01-09 17:43:57 +00:00
[sanitizer] in bitvector-based deadlock detector split onLock into onLockBefore and onLockAfter hooks
llvm-svn: 203796
This commit is contained in:
parent
9fa9a49853
commit
65dbf46950
@ -67,7 +67,6 @@ class DeadlockDetectorTLS {
|
||||
}
|
||||
|
||||
void removeLock(uptr lock_id) {
|
||||
// Printf("remLock: %zx %zx\n", lock_id, current_epoch);
|
||||
if (n_recursive_locks) {
|
||||
for (sptr i = n_recursive_locks - 1; i >= 0; i--) {
|
||||
if (recursive_locks[i] == lock_id) {
|
||||
@ -77,6 +76,7 @@ class DeadlockDetectorTLS {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Printf("remLock: %zx %zx\n", lock_id, epoch_);
|
||||
CHECK(bv_.clearBit(lock_id));
|
||||
}
|
||||
|
||||
@ -155,14 +155,30 @@ class DeadlockDetector {
|
||||
dtls->ensureCurrentEpoch(current_epoch_);
|
||||
}
|
||||
|
||||
// Handles the lock event, returns true if there is a cycle.
|
||||
// FIXME: handle RW locks, recursive locks, etc.
|
||||
bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
|
||||
// Returns true if there is a cycle in the graph after this lock event.
|
||||
// Ideally should be called before the lock is acquired so that we can
|
||||
// report a deadlock before a real deadlock happens.
|
||||
bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
|
||||
ensureCurrentEpoch(dtls);
|
||||
uptr cur_idx = nodeToIndex(cur_node);
|
||||
return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
|
||||
}
|
||||
|
||||
// Returns true if a new edge has been added to the graph.
|
||||
// Should be called before after the lock is acquired.
|
||||
bool onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
|
||||
ensureCurrentEpoch(dtls);
|
||||
uptr cur_idx = nodeToIndex(cur_node);
|
||||
bool is_reachable = g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
|
||||
g_.addEdges(dtls->getLocks(current_epoch_), cur_idx);
|
||||
return dtls->addLock(cur_idx, current_epoch_) && is_reachable;
|
||||
return dtls->addLock(cur_idx, current_epoch_);
|
||||
}
|
||||
|
||||
// Test-only function. Handles the before/after lock events,
|
||||
// returns true if there is a cycle.
|
||||
bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
|
||||
ensureCurrentEpoch(dtls);
|
||||
bool is_reachable = onLockBefore(dtls, cur_node);
|
||||
return onLockAfter(dtls, cur_node) && is_reachable;
|
||||
}
|
||||
|
||||
// Handles the try_lock event, returns false.
|
||||
@ -189,14 +205,14 @@ class DeadlockDetector {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Finds a path between the lock 'cur_node' (which is currently held in dtls)
|
||||
// and some other currently held lock, returns the length of the path
|
||||
// Finds a path between the lock 'cur_node' (currently not held in dtls)
|
||||
// and some currently held lock, returns the length of the path
|
||||
// or 0 on failure.
|
||||
uptr findPathToHeldLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node,
|
||||
uptr *path, uptr path_size) {
|
||||
uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,
|
||||
uptr path_size) {
|
||||
tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));
|
||||
uptr idx = nodeToIndex(cur_node);
|
||||
CHECK(tmp_bv_.clearBit(idx));
|
||||
CHECK(!tmp_bv_.getBit(idx));
|
||||
uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);
|
||||
for (uptr i = 0; i < res; i++)
|
||||
path[i] = indexToNode(path[i]);
|
||||
|
@ -98,24 +98,15 @@ void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
|
||||
|
||||
void DD::MutexBeforeLock(DDCallback *cb,
|
||||
DDMutex *m, bool wlock) {
|
||||
}
|
||||
|
||||
void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
|
||||
DDLogicalThread *lt = cb->lt;
|
||||
if (dd.onFirstLock(<->dd, m->id))
|
||||
return;
|
||||
if (lt->dd.empty()) return; // This will be the first lock held by lt.
|
||||
SpinMutexLock lk(&mtx);
|
||||
MutexEnsureID(lt, m);
|
||||
if (wlock) // Only a recursive rlock may be held.
|
||||
CHECK(!dd.isHeld(<->dd, m->id));
|
||||
// Printf("T%d MutexLock: %zx\n", thr->tid, s->deadlock_detector_id);
|
||||
bool has_deadlock = trylock
|
||||
? dd.onTryLock(<->dd, m->id)
|
||||
: dd.onLock(<->dd, m->id);
|
||||
if (has_deadlock) {
|
||||
if (dd.isHeld(<->dd, m->id))
|
||||
return; // FIXME: allow this only for recursive locks.
|
||||
if (dd.onLockBefore(<->dd, m->id)) {
|
||||
uptr path[10];
|
||||
uptr len = dd.findPathToHeldLock(<->dd, m->id,
|
||||
path, ARRAY_SIZE(path));
|
||||
uptr len = dd.findPathToLock(<->dd, m->id, path, ARRAY_SIZE(path));
|
||||
CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
|
||||
lt->report_pending = true;
|
||||
DDReport *rep = <->rep;
|
||||
@ -131,9 +122,24 @@ void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
|
||||
}
|
||||
}
|
||||
|
||||
void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
|
||||
DDLogicalThread *lt = cb->lt;
|
||||
// Printf("T%p MutexLock: %zx\n", lt, m->id);
|
||||
if (dd.onFirstLock(<->dd, m->id))
|
||||
return;
|
||||
SpinMutexLock lk(&mtx);
|
||||
MutexEnsureID(lt, m);
|
||||
if (wlock) // Only a recursive rlock may be held.
|
||||
CHECK(!dd.isHeld(<->dd, m->id));
|
||||
bool edge_added =
|
||||
trylock ? dd.onTryLock(<->dd, m->id) : dd.onLockAfter(<->dd, m->id);
|
||||
if (edge_added) {
|
||||
// Printf("Edge added\n");
|
||||
}
|
||||
}
|
||||
|
||||
void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
|
||||
// Printf("T%d MutexUnlock: %zx; recursion %d\n", thr->tid,
|
||||
// s->deadlock_detector_id, s->recursion);
|
||||
// Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
|
||||
dd.onUnlock(&cb->lt->dd, m->id);
|
||||
}
|
||||
|
||||
|
@ -82,10 +82,10 @@ void RunBasicTest() {
|
||||
d.onUnlock(&dtls, n1);
|
||||
|
||||
EXPECT_FALSE(d.onLock(&dtls, n2));
|
||||
EXPECT_EQ(0U, d.findPathToLock(&dtls, n1, path, 1));
|
||||
EXPECT_EQ(2U, d.findPathToLock(&dtls, n1, path, 10));
|
||||
EXPECT_EQ(2U, d.findPathToLock(&dtls, n1, path, 2));
|
||||
EXPECT_TRUE(d.onLock(&dtls, n1));
|
||||
EXPECT_EQ(0U, d.findPathToHeldLock(&dtls, n1, path, 1));
|
||||
EXPECT_EQ(2U, d.findPathToHeldLock(&dtls, n1, path, 10));
|
||||
EXPECT_EQ(2U, d.findPathToHeldLock(&dtls, n1, path, 2));
|
||||
EXPECT_EQ(path[0], n1);
|
||||
EXPECT_EQ(path[1], n2);
|
||||
EXPECT_EQ(d.getData(n1), 1U);
|
||||
@ -113,9 +113,9 @@ void RunBasicTest() {
|
||||
d.onUnlock(&dtls, n2);
|
||||
|
||||
EXPECT_FALSE(d.onLock(&dtls, n3));
|
||||
EXPECT_EQ(0U, d.findPathToLock(&dtls, n1, path, 2));
|
||||
EXPECT_EQ(3U, d.findPathToLock(&dtls, n1, path, 10));
|
||||
EXPECT_TRUE(d.onLock(&dtls, n1));
|
||||
EXPECT_EQ(0U, d.findPathToHeldLock(&dtls, n1, path, 2));
|
||||
EXPECT_EQ(3U, d.findPathToHeldLock(&dtls, n1, path, 10));
|
||||
EXPECT_EQ(path[0], n1);
|
||||
EXPECT_EQ(path[1], n2);
|
||||
EXPECT_EQ(path[2], n3);
|
||||
|
@ -141,7 +141,8 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
|
||||
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
|
||||
if (flags()->detect_deadlocks && s->recursion == 1) {
|
||||
Callback cb(thr, pc);
|
||||
ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
|
||||
if (!try_lock)
|
||||
ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
|
||||
ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
|
||||
}
|
||||
s->mtx.Unlock();
|
||||
@ -216,7 +217,8 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
|
||||
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
|
||||
if (flags()->detect_deadlocks && s->recursion == 0) {
|
||||
Callback cb(thr, pc);
|
||||
ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
|
||||
if (!trylock)
|
||||
ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
|
||||
ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
|
||||
}
|
||||
s->mtx.ReadUnlock();
|
||||
|
Loading…
Reference in New Issue
Block a user