mirror of
https://gcc.gnu.org/git/gcc.git
synced 2024-11-23 19:03:59 +08:00
696d846a56
libsanitizer/ 2015-10-20 Maxim Ostapenko <m.ostapenko@partner.samsung.com> * All source files: Merge from upstream r250806. * configure.ac (link_sanitizer_common): Add -lrt flag. * configure.tgt: Enable TSAN and LSAN for aarch64-linux targets. Set CXX_ABI_NEEDED=true for darwin. * asan/Makefile.am (asan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0. * asan/Makefile.in: Regenerate. * ubsan/Makefile.am (ubsan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=1. (libubsan_la_LIBADD): Add -lc++abi if CXX_ABI_NEEDED is true. * ubsan/Makefile.in: Regenerate. * tsan/Makefile.am (tsan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=0. * tsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files. * sanitizer_common/Makefile.in: Regenerate. * asan/libtool-version: Bump the libasan SONAME. From-SVN: r229111
218 lines
4.7 KiB
C++
218 lines
4.7 KiB
C++
//===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
|
|
//
|
|
// This file is distributed under the University of Illinois Open Source
|
|
// License. See LICENSE.TXT for details.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#ifndef SANITIZER_MUTEX_H
|
|
#define SANITIZER_MUTEX_H
|
|
|
|
#include "sanitizer_atomic.h"
|
|
#include "sanitizer_internal_defs.h"
|
|
#include "sanitizer_libc.h"
|
|
|
|
namespace __sanitizer {
|
|
|
|
class StaticSpinMutex {
|
|
public:
|
|
void Init() {
|
|
atomic_store(&state_, 0, memory_order_relaxed);
|
|
}
|
|
|
|
void Lock() {
|
|
if (TryLock())
|
|
return;
|
|
LockSlow();
|
|
}
|
|
|
|
bool TryLock() {
|
|
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
|
|
}
|
|
|
|
void Unlock() {
|
|
atomic_store(&state_, 0, memory_order_release);
|
|
}
|
|
|
|
void CheckLocked() {
|
|
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
|
|
}
|
|
|
|
private:
|
|
atomic_uint8_t state_;
|
|
|
|
void NOINLINE LockSlow() {
|
|
for (int i = 0;; i++) {
|
|
if (i < 10)
|
|
proc_yield(10);
|
|
else
|
|
internal_sched_yield();
|
|
if (atomic_load(&state_, memory_order_relaxed) == 0
|
|
&& atomic_exchange(&state_, 1, memory_order_acquire) == 0)
|
|
return;
|
|
}
|
|
}
|
|
};
|
|
|
|
class SpinMutex : public StaticSpinMutex {
|
|
public:
|
|
SpinMutex() {
|
|
Init();
|
|
}
|
|
|
|
private:
|
|
SpinMutex(const SpinMutex&);
|
|
void operator=(const SpinMutex&);
|
|
};
|
|
|
|
class BlockingMutex {
|
|
public:
|
|
#if SANITIZER_WINDOWS
|
|
// Windows does not currently support LinkerInitialized
|
|
explicit BlockingMutex(LinkerInitialized);
|
|
#else
|
|
explicit constexpr BlockingMutex(LinkerInitialized)
|
|
: opaque_storage_ {0, }, owner_(0) {}
|
|
#endif
|
|
BlockingMutex();
|
|
void Lock();
|
|
void Unlock();
|
|
void CheckLocked();
|
|
private:
|
|
uptr opaque_storage_[10];
|
|
uptr owner_; // for debugging
|
|
};
|
|
|
|
// Reader-writer spin mutex.
|
|
class RWMutex {
|
|
public:
|
|
RWMutex() {
|
|
atomic_store(&state_, kUnlocked, memory_order_relaxed);
|
|
}
|
|
|
|
~RWMutex() {
|
|
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
|
|
}
|
|
|
|
void Lock() {
|
|
u32 cmp = kUnlocked;
|
|
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
|
|
memory_order_acquire))
|
|
return;
|
|
LockSlow();
|
|
}
|
|
|
|
void Unlock() {
|
|
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
|
|
DCHECK_NE(prev & kWriteLock, 0);
|
|
(void)prev;
|
|
}
|
|
|
|
void ReadLock() {
|
|
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
|
|
if ((prev & kWriteLock) == 0)
|
|
return;
|
|
ReadLockSlow();
|
|
}
|
|
|
|
void ReadUnlock() {
|
|
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
|
|
DCHECK_EQ(prev & kWriteLock, 0);
|
|
DCHECK_GT(prev & ~kWriteLock, 0);
|
|
(void)prev;
|
|
}
|
|
|
|
void CheckLocked() {
|
|
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
|
|
}
|
|
|
|
private:
|
|
atomic_uint32_t state_;
|
|
|
|
enum {
|
|
kUnlocked = 0,
|
|
kWriteLock = 1,
|
|
kReadLock = 2
|
|
};
|
|
|
|
void NOINLINE LockSlow() {
|
|
for (int i = 0;; i++) {
|
|
if (i < 10)
|
|
proc_yield(10);
|
|
else
|
|
internal_sched_yield();
|
|
u32 cmp = atomic_load(&state_, memory_order_relaxed);
|
|
if (cmp == kUnlocked &&
|
|
atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
|
|
memory_order_acquire))
|
|
return;
|
|
}
|
|
}
|
|
|
|
void NOINLINE ReadLockSlow() {
|
|
for (int i = 0;; i++) {
|
|
if (i < 10)
|
|
proc_yield(10);
|
|
else
|
|
internal_sched_yield();
|
|
u32 prev = atomic_load(&state_, memory_order_acquire);
|
|
if ((prev & kWriteLock) == 0)
|
|
return;
|
|
}
|
|
}
|
|
|
|
RWMutex(const RWMutex&);
|
|
void operator = (const RWMutex&);
|
|
};
|
|
|
|
template<typename MutexType>
|
|
class GenericScopedLock {
|
|
public:
|
|
explicit GenericScopedLock(MutexType *mu)
|
|
: mu_(mu) {
|
|
mu_->Lock();
|
|
}
|
|
|
|
~GenericScopedLock() {
|
|
mu_->Unlock();
|
|
}
|
|
|
|
private:
|
|
MutexType *mu_;
|
|
|
|
GenericScopedLock(const GenericScopedLock&);
|
|
void operator=(const GenericScopedLock&);
|
|
};
|
|
|
|
template<typename MutexType>
|
|
class GenericScopedReadLock {
|
|
public:
|
|
explicit GenericScopedReadLock(MutexType *mu)
|
|
: mu_(mu) {
|
|
mu_->ReadLock();
|
|
}
|
|
|
|
~GenericScopedReadLock() {
|
|
mu_->ReadUnlock();
|
|
}
|
|
|
|
private:
|
|
MutexType *mu_;
|
|
|
|
GenericScopedReadLock(const GenericScopedReadLock&);
|
|
void operator=(const GenericScopedReadLock&);
|
|
};
|
|
|
|
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
|
|
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
|
|
typedef GenericScopedLock<RWMutex> RWMutexLock;
|
|
typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
|
|
|
|
} // namespace __sanitizer
|
|
|
|
#endif // SANITIZER_MUTEX_H
|