mirror of
https://gcc.gnu.org/git/gcc.git
synced 2024-11-23 19:03:59 +08:00
1018981977
libsanitizer/ * All source files: Merge from upstream 285547. * configure.tgt (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): New variable. * configure.ac (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): Handle it. * asan/Makefile.am (asan_files): Add new files. * asan/Makefile.in: Regenerate. * ubsan/Makefile.in: Likewise. * lsan/Makefile.in: Likewise. * tsan/Makefile.am (tsan_files): Add new files. * tsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files. (EXTRA_libsanitizer_common_la_SOURCES): Define. (libsanitizer_common_la_LIBADD): Likewise. (libsanitizer_common_la_DEPENDENCIES): Likewise. * sanitizer_common/Makefile.in: Regenerate. * interception/Makefile.in: Likewise. * libbacktace/Makefile.in: Likewise. * Makefile.in: Likewise. * configure: Likewise. * merge.sh: Handle builtins/assembly.h merging. * builtins/assembly.h: New file. * asan/libtool-version: Bump the libasan SONAME. From-SVN: r241977
194 lines
5.5 KiB
C++
194 lines
5.5 KiB
C++
//===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
|
|
//
|
|
// This file is distributed under the University of Illinois Open Source
|
|
// License. See LICENSE.TXT for details.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// Deadlock detector implementation based on NxN adjacency bit matrix.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "sanitizer_deadlock_detector_interface.h"
|
|
#include "sanitizer_deadlock_detector.h"
|
|
#include "sanitizer_allocator_internal.h"
|
|
#include "sanitizer_placement_new.h"
|
|
#include "sanitizer_mutex.h"
|
|
|
|
#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
|
|
|
|
namespace __sanitizer {
|
|
|
|
typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
|
|
|
|
struct DDPhysicalThread {
|
|
};
|
|
|
|
struct DDLogicalThread {
|
|
u64 ctx;
|
|
DeadlockDetectorTLS<DDBV> dd;
|
|
DDReport rep;
|
|
bool report_pending;
|
|
};
|
|
|
|
struct DD : public DDetector {
|
|
SpinMutex mtx;
|
|
DeadlockDetector<DDBV> dd;
|
|
DDFlags flags;
|
|
|
|
explicit DD(const DDFlags *flags);
|
|
|
|
DDPhysicalThread *CreatePhysicalThread() override;
|
|
void DestroyPhysicalThread(DDPhysicalThread *pt) override;
|
|
|
|
DDLogicalThread *CreateLogicalThread(u64 ctx) override;
|
|
void DestroyLogicalThread(DDLogicalThread *lt) override;
|
|
|
|
void MutexInit(DDCallback *cb, DDMutex *m) override;
|
|
void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
|
|
void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
|
|
bool trylock) override;
|
|
void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
|
|
void MutexDestroy(DDCallback *cb, DDMutex *m) override;
|
|
|
|
DDReport *GetReport(DDCallback *cb) override;
|
|
|
|
void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
|
|
void ReportDeadlock(DDCallback *cb, DDMutex *m);
|
|
};
|
|
|
|
DDetector *DDetector::Create(const DDFlags *flags) {
|
|
(void)flags;
|
|
void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
|
|
return new(mem) DD(flags);
|
|
}
|
|
|
|
DD::DD(const DDFlags *flags)
|
|
: flags(*flags) {
|
|
dd.clear();
|
|
}
|
|
|
|
DDPhysicalThread* DD::CreatePhysicalThread() {
|
|
return nullptr;
|
|
}
|
|
|
|
void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
|
|
}
|
|
|
|
DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
|
|
DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
|
|
lt->ctx = ctx;
|
|
lt->dd.clear();
|
|
lt->report_pending = false;
|
|
return lt;
|
|
}
|
|
|
|
void DD::DestroyLogicalThread(DDLogicalThread *lt) {
|
|
lt->~DDLogicalThread();
|
|
InternalFree(lt);
|
|
}
|
|
|
|
void DD::MutexInit(DDCallback *cb, DDMutex *m) {
|
|
m->id = 0;
|
|
m->stk = cb->Unwind();
|
|
}
|
|
|
|
void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
|
|
if (!dd.nodeBelongsToCurrentEpoch(m->id))
|
|
m->id = dd.newNode(reinterpret_cast<uptr>(m));
|
|
dd.ensureCurrentEpoch(<->dd);
|
|
}
|
|
|
|
void DD::MutexBeforeLock(DDCallback *cb,
|
|
DDMutex *m, bool wlock) {
|
|
DDLogicalThread *lt = cb->lt;
|
|
if (lt->dd.empty()) return; // This will be the first lock held by lt.
|
|
if (dd.hasAllEdges(<->dd, m->id)) return; // We already have all edges.
|
|
SpinMutexLock lk(&mtx);
|
|
MutexEnsureID(lt, m);
|
|
if (dd.isHeld(<->dd, m->id))
|
|
return; // FIXME: allow this only for recursive locks.
|
|
if (dd.onLockBefore(<->dd, m->id)) {
|
|
// Actually add this edge now so that we have all the stack traces.
|
|
dd.addEdges(<->dd, m->id, cb->Unwind(), cb->UniqueTid());
|
|
ReportDeadlock(cb, m);
|
|
}
|
|
}
|
|
|
|
void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
|
|
DDLogicalThread *lt = cb->lt;
|
|
uptr path[20];
|
|
uptr len = dd.findPathToLock(<->dd, m->id, path, ARRAY_SIZE(path));
|
|
if (len == 0U) {
|
|
// A cycle of 20+ locks? Well, that's a bit odd...
|
|
Printf("WARNING: too long mutex cycle found\n");
|
|
return;
|
|
}
|
|
CHECK_EQ(m->id, path[0]);
|
|
lt->report_pending = true;
|
|
len = Min<uptr>(len, DDReport::kMaxLoopSize);
|
|
DDReport *rep = <->rep;
|
|
rep->n = len;
|
|
for (uptr i = 0; i < len; i++) {
|
|
uptr from = path[i];
|
|
uptr to = path[(i + 1) % len];
|
|
DDMutex *m0 = (DDMutex*)dd.getData(from);
|
|
DDMutex *m1 = (DDMutex*)dd.getData(to);
|
|
|
|
u32 stk_from = -1U, stk_to = -1U;
|
|
int unique_tid = 0;
|
|
dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
|
|
// Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
|
|
// unique_tid);
|
|
rep->loop[i].thr_ctx = unique_tid;
|
|
rep->loop[i].mtx_ctx0 = m0->ctx;
|
|
rep->loop[i].mtx_ctx1 = m1->ctx;
|
|
rep->loop[i].stk[0] = stk_to;
|
|
rep->loop[i].stk[1] = stk_from;
|
|
}
|
|
}
|
|
|
|
void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
|
|
DDLogicalThread *lt = cb->lt;
|
|
u32 stk = 0;
|
|
if (flags.second_deadlock_stack)
|
|
stk = cb->Unwind();
|
|
// Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
|
|
if (dd.onFirstLock(<->dd, m->id, stk))
|
|
return;
|
|
if (dd.onLockFast(<->dd, m->id, stk))
|
|
return;
|
|
|
|
SpinMutexLock lk(&mtx);
|
|
MutexEnsureID(lt, m);
|
|
if (wlock) // Only a recursive rlock may be held.
|
|
CHECK(!dd.isHeld(<->dd, m->id));
|
|
if (!trylock)
|
|
dd.addEdges(<->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
|
|
dd.onLockAfter(<->dd, m->id, stk);
|
|
}
|
|
|
|
void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
|
|
// Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
|
|
dd.onUnlock(&cb->lt->dd, m->id);
|
|
}
|
|
|
|
void DD::MutexDestroy(DDCallback *cb,
|
|
DDMutex *m) {
|
|
if (!m->id) return;
|
|
SpinMutexLock lk(&mtx);
|
|
if (dd.nodeBelongsToCurrentEpoch(m->id))
|
|
dd.removeNode(m->id);
|
|
m->id = 0;
|
|
}
|
|
|
|
DDReport *DD::GetReport(DDCallback *cb) {
|
|
if (!cb->lt->report_pending)
|
|
return nullptr;
|
|
cb->lt->report_pending = false;
|
|
return &cb->lt->rep;
|
|
}
|
|
|
|
} // namespace __sanitizer
|
|
#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
|