mirror of
https://gcc.gnu.org/git/gcc.git
synced 2024-11-23 10:54:07 +08:00
libsanitizer: Merge with upstream
Merged revision: 82bc6a094e85014f1891ef9407496f44af8fe442 with the fix for PR sanitizer/102911
This commit is contained in:
parent
a30a2e43e4
commit
86289a4ff4
@ -1,4 +1,4 @@
|
||||
78d3e0a4f1406b17cdecc77540e09210670fe9a9
|
||||
82bc6a094e85014f1891ef9407496f44af8fe442
|
||||
|
||||
The first line of this file holds the git revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
@ -102,19 +102,18 @@ class ChunkHeader {
|
||||
|
||||
public:
|
||||
uptr UsedSize() const {
|
||||
uptr R = user_requested_size_lo;
|
||||
if (sizeof(uptr) > sizeof(user_requested_size_lo))
|
||||
R += (uptr)user_requested_size_hi << (8 * sizeof(user_requested_size_lo));
|
||||
return R;
|
||||
static_assert(sizeof(user_requested_size_lo) == 4,
|
||||
"Expression below requires this");
|
||||
return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) +
|
||||
user_requested_size_lo;
|
||||
}
|
||||
|
||||
void SetUsedSize(uptr size) {
|
||||
user_requested_size_lo = size;
|
||||
if (sizeof(uptr) > sizeof(user_requested_size_lo)) {
|
||||
size >>= (8 * sizeof(user_requested_size_lo));
|
||||
user_requested_size_hi = size;
|
||||
CHECK_EQ(user_requested_size_hi, size);
|
||||
}
|
||||
static_assert(sizeof(user_requested_size_lo) == 4,
|
||||
"Expression below requires this");
|
||||
user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32);
|
||||
CHECK_EQ(UsedSize(), size);
|
||||
}
|
||||
|
||||
void SetAllocContext(u32 tid, u32 stack) {
|
||||
|
@ -154,6 +154,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check ODR violation for given global G by checking if it's already poisoned.
|
||||
// We use this method in case compiler doesn't use private aliases for global
|
||||
// variables.
|
||||
static void CheckODRViolationViaPoisoning(const Global *g) {
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
// This check may not be enough: if the first global is much larger
|
||||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clang provides two different ways for global variables protection:
|
||||
// it can poison the global itself or its private alias. In former
|
||||
// case we may poison same symbol multiple times, that can help us to
|
||||
@ -199,6 +216,8 @@ static void RegisterGlobal(const Global *g) {
|
||||
// where two globals with the same name are defined in different modules.
|
||||
if (UseODRIndicator(g))
|
||||
CheckODRViolationViaIndicator(g);
|
||||
else
|
||||
CheckODRViolationViaPoisoning(g);
|
||||
}
|
||||
if (CanPoisonMemory())
|
||||
PoisonRedZones(*g);
|
||||
|
@ -81,12 +81,7 @@ void InitializePlatformInterceptors();
|
||||
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
|
||||
!SANITIZER_NETBSD
|
||||
# define ASAN_INTERCEPT___CXA_THROW 1
|
||||
# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
|
||||
|| ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# else
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
|
||||
# endif
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
|
||||
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
|
||||
# else
|
||||
|
@ -21,129 +21,66 @@
|
||||
# include "asan_interceptors.h"
|
||||
# include "asan_internal.h"
|
||||
# include "asan_stack.h"
|
||||
# include "lsan/lsan_common.h"
|
||||
# include "sanitizer_common/sanitizer_allocator_checks.h"
|
||||
# include "sanitizer_common/sanitizer_allocator_dlsym.h"
|
||||
# include "sanitizer_common/sanitizer_errno.h"
|
||||
# include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
|
||||
// ---------------------- Replacement functions ---------------- {{{1
|
||||
using namespace __asan;
|
||||
|
||||
static uptr allocated_for_dlsym;
|
||||
static uptr last_dlsym_alloc_size_in_words;
|
||||
static const uptr kDlsymAllocPoolSize = 1024;
|
||||
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
|
||||
|
||||
static inline bool IsInDlsymAllocPool(const void *ptr) {
|
||||
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||
return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
|
||||
}
|
||||
|
||||
static void *AllocateFromLocalPool(uptr size_in_bytes) {
|
||||
uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
|
||||
void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym];
|
||||
last_dlsym_alloc_size_in_words = size_in_words;
|
||||
allocated_for_dlsym += size_in_words;
|
||||
CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
|
||||
return mem;
|
||||
}
|
||||
|
||||
static void DeallocateFromLocalPool(const void *ptr) {
|
||||
// Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store
|
||||
// error messages and instead uses malloc followed by free. To avoid pool
|
||||
// exhaustion due to long object filenames, handle that special case here.
|
||||
uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words;
|
||||
void *prev_mem = (void*)&alloc_memory_for_dlsym[prev_offset];
|
||||
if (prev_mem == ptr) {
|
||||
REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize);
|
||||
allocated_for_dlsym = prev_offset;
|
||||
last_dlsym_alloc_size_in_words = 0;
|
||||
struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
|
||||
static bool UseImpl() { return asan_init_is_running; }
|
||||
static void OnAllocate(const void *ptr, uptr size) {
|
||||
# if CAN_SANITIZE_LEAKS
|
||||
// Suppress leaks from dlerror(). Previously dlsym hack on global array was
|
||||
// used by leak sanitizer as a root region.
|
||||
__lsan_register_root_region(ptr, size);
|
||||
# endif
|
||||
}
|
||||
}
|
||||
|
||||
static int PosixMemalignFromLocalPool(void **memptr, uptr alignment,
|
||||
uptr size_in_bytes) {
|
||||
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment)))
|
||||
return errno_EINVAL;
|
||||
|
||||
CHECK(alignment >= kWordSize);
|
||||
|
||||
uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym];
|
||||
uptr aligned_addr = RoundUpTo(addr, alignment);
|
||||
uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize);
|
||||
|
||||
uptr *end_mem = (uptr*)(aligned_addr + aligned_size);
|
||||
uptr allocated = end_mem - alloc_memory_for_dlsym;
|
||||
if (allocated >= kDlsymAllocPoolSize)
|
||||
return errno_ENOMEM;
|
||||
|
||||
allocated_for_dlsym = allocated;
|
||||
*memptr = (void*)aligned_addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool MaybeInDlsym() {
|
||||
// Fuchsia doesn't use dlsym-based interceptors.
|
||||
return !SANITIZER_FUCHSIA && asan_init_is_running;
|
||||
}
|
||||
|
||||
static inline bool UseLocalPool() { return MaybeInDlsym(); }
|
||||
|
||||
static void *ReallocFromLocalPool(void *ptr, uptr size) {
|
||||
const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||
const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
|
||||
void *new_ptr;
|
||||
if (UNLIKELY(UseLocalPool())) {
|
||||
new_ptr = AllocateFromLocalPool(size);
|
||||
} else {
|
||||
ENSURE_ASAN_INITED();
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
new_ptr = asan_malloc(size, &stack);
|
||||
static void OnFree(const void *ptr, uptr size) {
|
||||
# if CAN_SANITIZE_LEAKS
|
||||
__lsan_unregister_root_region(ptr, size);
|
||||
# endif
|
||||
}
|
||||
internal_memcpy(new_ptr, ptr, copy_size);
|
||||
return new_ptr;
|
||||
}
|
||||
};
|
||||
|
||||
INTERCEPTOR(void, free, void *ptr) {
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
|
||||
DeallocateFromLocalPool(ptr);
|
||||
return;
|
||||
}
|
||||
if (DlsymAlloc::PointerIsMine(ptr))
|
||||
return DlsymAlloc::Free(ptr);
|
||||
GET_STACK_TRACE_FREE;
|
||||
asan_free(ptr, &stack, FROM_MALLOC);
|
||||
}
|
||||
|
||||
#if SANITIZER_INTERCEPT_CFREE
|
||||
INTERCEPTOR(void, cfree, void *ptr) {
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
|
||||
return;
|
||||
if (DlsymAlloc::PointerIsMine(ptr))
|
||||
return DlsymAlloc::Free(ptr);
|
||||
GET_STACK_TRACE_FREE;
|
||||
asan_free(ptr, &stack, FROM_MALLOC);
|
||||
}
|
||||
#endif // SANITIZER_INTERCEPT_CFREE
|
||||
|
||||
INTERCEPTOR(void*, malloc, uptr size) {
|
||||
if (UNLIKELY(UseLocalPool()))
|
||||
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
|
||||
return AllocateFromLocalPool(size);
|
||||
if (DlsymAlloc::Use())
|
||||
return DlsymAlloc::Allocate(size);
|
||||
ENSURE_ASAN_INITED();
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return asan_malloc(size, &stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
|
||||
if (UNLIKELY(UseLocalPool()))
|
||||
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
|
||||
return AllocateFromLocalPool(nmemb * size);
|
||||
if (DlsymAlloc::Use())
|
||||
return DlsymAlloc::Callocate(nmemb, size);
|
||||
ENSURE_ASAN_INITED();
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return asan_calloc(nmemb, size, &stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
|
||||
return ReallocFromLocalPool(ptr, size);
|
||||
if (UNLIKELY(UseLocalPool()))
|
||||
return AllocateFromLocalPool(size);
|
||||
if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
|
||||
return DlsymAlloc::Realloc(ptr, size);
|
||||
ENSURE_ASAN_INITED();
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return asan_realloc(ptr, size, &stack);
|
||||
@ -205,8 +142,6 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
|
||||
#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
|
||||
|
||||
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
|
||||
if (UNLIKELY(UseLocalPool()))
|
||||
return PosixMemalignFromLocalPool(memptr, alignment, size);
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return asan_posix_memalign(memptr, alignment, size, &stack);
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
|
||||
static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
|
||||
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
|
||||
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
|
||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
|
||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
|
||||
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
|
||||
static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
|
||||
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
|
@ -345,7 +345,7 @@ __attribute__((constructor(0))) void __hwasan_init() {
|
||||
|
||||
// Needs to be called here because flags()->random_tags might not have been
|
||||
// initialized when InitInstrumentation() was called.
|
||||
GetCurrentThread()->InitRandomState();
|
||||
GetCurrentThread()->EnsureRandomStateInited();
|
||||
|
||||
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
|
||||
// This may call libc -> needs initialized shadow.
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
#include "hwasan.h"
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_dlsym.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
|
||||
@ -21,22 +22,9 @@
|
||||
|
||||
using namespace __hwasan;
|
||||
|
||||
static uptr allocated_for_dlsym;
|
||||
static const uptr kDlsymAllocPoolSize = 1024;
|
||||
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
|
||||
|
||||
static bool IsInDlsymAllocPool(const void *ptr) {
|
||||
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||
return off < sizeof(alloc_memory_for_dlsym);
|
||||
}
|
||||
|
||||
static void *AllocateFromLocalPool(uptr size_in_bytes) {
|
||||
uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
|
||||
void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
|
||||
allocated_for_dlsym += size_in_words;
|
||||
CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
|
||||
return mem;
|
||||
}
|
||||
struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
|
||||
static bool UseImpl() { return !hwasan_inited; }
|
||||
};
|
||||
|
||||
extern "C" {
|
||||
|
||||
@ -83,17 +71,21 @@ void *__sanitizer_pvalloc(uptr size) {
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_free(void *ptr) {
|
||||
GET_MALLOC_STACK_TRACE;
|
||||
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
|
||||
if (!ptr)
|
||||
return;
|
||||
if (DlsymAlloc::PointerIsMine(ptr))
|
||||
return DlsymAlloc::Free(ptr);
|
||||
GET_MALLOC_STACK_TRACE;
|
||||
hwasan_free(ptr, &stack);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_cfree(void *ptr) {
|
||||
GET_MALLOC_STACK_TRACE;
|
||||
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
|
||||
if (!ptr)
|
||||
return;
|
||||
if (DlsymAlloc::PointerIsMine(ptr))
|
||||
return DlsymAlloc::Free(ptr);
|
||||
GET_MALLOC_STACK_TRACE;
|
||||
hwasan_free(ptr, &stack);
|
||||
}
|
||||
|
||||
@ -119,29 +111,17 @@ void __sanitizer_malloc_stats(void) {
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__sanitizer_calloc(uptr nmemb, uptr size) {
|
||||
if (DlsymAlloc::Use())
|
||||
return DlsymAlloc::Callocate(nmemb, size);
|
||||
GET_MALLOC_STACK_TRACE;
|
||||
if (UNLIKELY(!hwasan_inited))
|
||||
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
|
||||
return AllocateFromLocalPool(nmemb * size);
|
||||
return hwasan_calloc(nmemb, size, &stack);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__sanitizer_realloc(void *ptr, uptr size) {
|
||||
if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
|
||||
return DlsymAlloc::Realloc(ptr, size);
|
||||
GET_MALLOC_STACK_TRACE;
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
|
||||
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
|
||||
void *new_ptr;
|
||||
if (UNLIKELY(!hwasan_inited)) {
|
||||
new_ptr = AllocateFromLocalPool(copy_size);
|
||||
} else {
|
||||
copy_size = size;
|
||||
new_ptr = hwasan_malloc(copy_size, &stack);
|
||||
}
|
||||
internal_memcpy(new_ptr, ptr, copy_size);
|
||||
return new_ptr;
|
||||
}
|
||||
return hwasan_realloc(ptr, size, &stack);
|
||||
}
|
||||
|
||||
@ -153,12 +133,11 @@ void *__sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size) {
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__sanitizer_malloc(uptr size) {
|
||||
GET_MALLOC_STACK_TRACE;
|
||||
if (UNLIKELY(!hwasan_init_is_running))
|
||||
ENSURE_HWASAN_INITED();
|
||||
if (UNLIKELY(!hwasan_inited))
|
||||
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
|
||||
return AllocateFromLocalPool(size);
|
||||
if (DlsymAlloc::Use())
|
||||
return DlsymAlloc::Allocate(size);
|
||||
GET_MALLOC_STACK_TRACE;
|
||||
return hwasan_malloc(size, &stack);
|
||||
}
|
||||
|
||||
|
@ -29,8 +29,8 @@ typedef _Unwind_Reason_Code PersonalityFn(int version, _Unwind_Action actions,
|
||||
// is statically linked and the sanitizer runtime and the program are linked
|
||||
// against different unwinders. The _Unwind_Context data structure is opaque so
|
||||
// it may be incompatible between unwinders.
|
||||
typedef _Unwind_Word GetGRFn(_Unwind_Context* context, int index);
|
||||
typedef _Unwind_Word GetCFAFn(_Unwind_Context* context);
|
||||
typedef uintptr_t GetGRFn(_Unwind_Context* context, int index);
|
||||
typedef uintptr_t GetCFAFn(_Unwind_Context* context);
|
||||
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE _Unwind_Reason_Code
|
||||
__hwasan_personality_wrapper(int version, _Unwind_Action actions,
|
||||
|
@ -130,7 +130,7 @@ static void ThreadCreateHook(void *hook, bool aborted) {
|
||||
static void ThreadStartHook(void *hook, thrd_t self) {
|
||||
Thread *thread = static_cast<Thread *>(hook);
|
||||
FinishThreadInitialization(thread);
|
||||
thread->InitRandomState();
|
||||
thread->EnsureRandomStateInited();
|
||||
}
|
||||
|
||||
// This is the function that sets up the stack ring buffer and enables us to use
|
||||
|
@ -250,7 +250,7 @@ void InstallAtExitHandler() { atexit(HwasanAtExit); }
|
||||
// ---------------------- TSD ---------------- {{{1
|
||||
|
||||
extern "C" void __hwasan_thread_enter() {
|
||||
hwasanThreadList().CreateCurrentThread()->InitRandomState();
|
||||
hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
|
||||
}
|
||||
|
||||
extern "C" void __hwasan_thread_exit() {
|
||||
|
@ -1,15 +1,15 @@
|
||||
|
||||
#include "hwasan_thread.h"
|
||||
|
||||
#include "hwasan.h"
|
||||
#include "hwasan_mapping.h"
|
||||
#include "hwasan_thread.h"
|
||||
#include "hwasan_poisoning.h"
|
||||
#include "hwasan_interface_internal.h"
|
||||
|
||||
#include "hwasan_mapping.h"
|
||||
#include "hwasan_poisoning.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_file.h"
|
||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
|
||||
|
||||
namespace __hwasan {
|
||||
|
||||
static u32 RandomSeed() {
|
||||
@ -27,6 +27,7 @@ static u32 RandomSeed() {
|
||||
|
||||
void Thread::InitRandomState() {
|
||||
random_state_ = flags()->random_tags ? RandomSeed() : unique_id_;
|
||||
random_state_inited_ = true;
|
||||
|
||||
// Push a random number of zeros onto the ring buffer so that the first stack
|
||||
// tag base will be random.
|
||||
@ -40,8 +41,9 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
|
||||
CHECK_EQ(0, stack_top_);
|
||||
CHECK_EQ(0, stack_bottom_);
|
||||
|
||||
static u64 unique_id;
|
||||
unique_id_ = unique_id++;
|
||||
static atomic_uint64_t unique_id;
|
||||
unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed);
|
||||
|
||||
if (auto sz = flags()->heap_history_size)
|
||||
heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
|
||||
|
||||
@ -123,17 +125,21 @@ static u32 xorshift(u32 state) {
|
||||
// Generate a (pseudo-)random non-zero tag.
|
||||
tag_t Thread::GenerateRandomTag(uptr num_bits) {
|
||||
DCHECK_GT(num_bits, 0);
|
||||
if (tagging_disabled_) return 0;
|
||||
if (tagging_disabled_)
|
||||
return 0;
|
||||
tag_t tag;
|
||||
const uptr tag_mask = (1ULL << num_bits) - 1;
|
||||
do {
|
||||
if (flags()->random_tags) {
|
||||
if (!random_buffer_)
|
||||
if (!random_buffer_) {
|
||||
EnsureRandomStateInited();
|
||||
random_buffer_ = random_state_ = xorshift(random_state_);
|
||||
}
|
||||
CHECK(random_buffer_);
|
||||
tag = random_buffer_ & tag_mask;
|
||||
random_buffer_ >>= num_bits;
|
||||
} else {
|
||||
EnsureRandomStateInited();
|
||||
random_state_ += 1;
|
||||
tag = random_state_ & tag_mask;
|
||||
}
|
||||
|
@ -28,12 +28,17 @@ class Thread {
|
||||
|
||||
void Init(uptr stack_buffer_start, uptr stack_buffer_size,
|
||||
const InitState *state = nullptr);
|
||||
void InitRandomState();
|
||||
|
||||
void InitStackAndTls(const InitState *state = nullptr);
|
||||
|
||||
// Must be called from the thread itself.
|
||||
void InitStackRingBuffer(uptr stack_buffer_start, uptr stack_buffer_size);
|
||||
|
||||
inline void EnsureRandomStateInited() {
|
||||
if (UNLIKELY(!random_state_inited_))
|
||||
InitRandomState();
|
||||
}
|
||||
|
||||
void Destroy();
|
||||
|
||||
uptr stack_top() { return stack_top_; }
|
||||
@ -70,6 +75,7 @@ class Thread {
|
||||
// via mmap() and *must* be valid in zero-initialized state.
|
||||
void ClearShadowForThreadStackAndTLS();
|
||||
void Print(const char *prefix);
|
||||
void InitRandomState();
|
||||
uptr vfork_spill_;
|
||||
uptr stack_top_;
|
||||
uptr stack_bottom_;
|
||||
@ -89,6 +95,8 @@ class Thread {
|
||||
|
||||
bool announced_;
|
||||
|
||||
bool random_state_inited_; // Whether InitRandomState() has been called.
|
||||
|
||||
friend struct ThreadListHead;
|
||||
};
|
||||
|
||||
|
@ -131,18 +131,13 @@ static LeakSuppressionContext *GetSuppressionContext() {
|
||||
return suppression_ctx;
|
||||
}
|
||||
|
||||
static InternalMmapVector<RootRegion> *root_regions;
|
||||
static InternalMmapVectorNoCtor<RootRegion> root_regions;
|
||||
|
||||
InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
|
||||
|
||||
void InitializeRootRegions() {
|
||||
CHECK(!root_regions);
|
||||
ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
|
||||
root_regions = new (placeholder) InternalMmapVector<RootRegion>();
|
||||
InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
|
||||
return &root_regions;
|
||||
}
|
||||
|
||||
void InitCommonLsan() {
|
||||
InitializeRootRegions();
|
||||
if (common_flags()->detect_leaks) {
|
||||
// Initialization which can fail or print warnings should only be done if
|
||||
// LSan is actually enabled.
|
||||
@ -426,10 +421,8 @@ static void ProcessRootRegion(Frontier *frontier,
|
||||
// Scans root regions for heap pointers.
|
||||
static void ProcessRootRegions(Frontier *frontier) {
|
||||
if (!flags()->use_root_regions) return;
|
||||
CHECK(root_regions);
|
||||
for (uptr i = 0; i < root_regions->size(); i++) {
|
||||
ProcessRootRegion(frontier, (*root_regions)[i]);
|
||||
}
|
||||
for (uptr i = 0; i < root_regions.size(); i++)
|
||||
ProcessRootRegion(frontier, root_regions[i]);
|
||||
}
|
||||
|
||||
static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
|
||||
@ -966,9 +959,8 @@ SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __lsan_register_root_region(const void *begin, uptr size) {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
Lock l(&global_mutex);
|
||||
CHECK(root_regions);
|
||||
RootRegion region = {reinterpret_cast<uptr>(begin), size};
|
||||
root_regions->push_back(region);
|
||||
root_regions.push_back(region);
|
||||
VReport(1, "Registered root region at %p of size %zu\n", begin, size);
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
@ -977,15 +969,14 @@ SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __lsan_unregister_root_region(const void *begin, uptr size) {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
Lock l(&global_mutex);
|
||||
CHECK(root_regions);
|
||||
bool removed = false;
|
||||
for (uptr i = 0; i < root_regions->size(); i++) {
|
||||
RootRegion region = (*root_regions)[i];
|
||||
for (uptr i = 0; i < root_regions.size(); i++) {
|
||||
RootRegion region = root_regions[i];
|
||||
if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
|
||||
removed = true;
|
||||
uptr last_index = root_regions->size() - 1;
|
||||
(*root_regions)[i] = (*root_regions)[last_index];
|
||||
root_regions->pop_back();
|
||||
uptr last_index = root_regions.size() - 1;
|
||||
root_regions[i] = root_regions[last_index];
|
||||
root_regions.pop_back();
|
||||
VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
|
||||
break;
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ struct CheckForLeaksParam {
|
||||
bool success = false;
|
||||
};
|
||||
|
||||
InternalMmapVector<RootRegion> const *GetRootRegions();
|
||||
InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions();
|
||||
void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
|
||||
uptr region_begin, uptr region_end, bool is_readable);
|
||||
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
|
||||
@ -280,6 +280,13 @@ int __lsan_is_turned_off();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
const char *__lsan_default_suppressions();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __lsan_register_root_region(const void *p, __lsan::uptr size);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __lsan_unregister_root_region(const void *p, __lsan::uptr size);
|
||||
|
||||
} // extern "C"
|
||||
|
||||
#endif // LSAN_COMMON_H
|
||||
|
@ -149,7 +149,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
||||
kern_return_t err = KERN_SUCCESS;
|
||||
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
|
||||
|
||||
InternalMmapVector<RootRegion> const *root_regions = GetRootRegions();
|
||||
InternalMmapVectorNoCtor<RootRegion> const *root_regions = GetRootRegions();
|
||||
|
||||
while (err == KERN_SUCCESS) {
|
||||
struct vm_region_submap_info_64 info;
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_allocator.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_dlsym.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_report.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
@ -43,6 +44,22 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
|
||||
int pthread_setspecific(unsigned key, const void *v);
|
||||
}
|
||||
|
||||
struct DlsymAlloc : DlSymAllocator<DlsymAlloc> {
|
||||
static bool UseImpl() { return lsan_init_is_running; }
|
||||
static void OnAllocate(const void *ptr, uptr size) {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
// Suppress leaks from dlerror(). Previously dlsym hack on global array was
|
||||
// used by leak sanitizer as a root region.
|
||||
__lsan_register_root_region(ptr, size);
|
||||
#endif
|
||||
}
|
||||
static void OnFree(const void *ptr, uptr size) {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
__lsan_unregister_root_region(ptr, size);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
///// Malloc/free interceptors. /////
|
||||
|
||||
namespace std {
|
||||
@ -52,41 +69,34 @@ namespace std {
|
||||
|
||||
#if !SANITIZER_MAC
|
||||
INTERCEPTOR(void*, malloc, uptr size) {
|
||||
if (DlsymAlloc::Use())
|
||||
return DlsymAlloc::Allocate(size);
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return lsan_malloc(size, stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, free, void *p) {
|
||||
if (DlsymAlloc::PointerIsMine(p))
|
||||
return DlsymAlloc::Free(p);
|
||||
ENSURE_LSAN_INITED;
|
||||
lsan_free(p);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
|
||||
// This hack is not required for Fuchsia because there are no dlsym calls
|
||||
// involved in setting up interceptors.
|
||||
#if !SANITIZER_FUCHSIA
|
||||
if (lsan_init_is_running) {
|
||||
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
|
||||
const uptr kCallocPoolSize = 1024;
|
||||
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
|
||||
static uptr allocated;
|
||||
uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
|
||||
void *mem = (void*)&calloc_memory_for_dlsym[allocated];
|
||||
allocated += size_in_words;
|
||||
CHECK(allocated < kCallocPoolSize);
|
||||
return mem;
|
||||
}
|
||||
#endif // !SANITIZER_FUCHSIA
|
||||
if (DlsymAlloc::Use())
|
||||
return DlsymAlloc::Callocate(nmemb, size);
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return lsan_calloc(nmemb, size, stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, realloc, void *q, uptr size) {
|
||||
INTERCEPTOR(void *, realloc, void *ptr, uptr size) {
|
||||
if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
|
||||
return DlsymAlloc::Realloc(ptr, size);
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return lsan_realloc(q, size, stack);
|
||||
return lsan_realloc(ptr, size, stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, reallocarray, void *q, uptr nmemb, uptr size) {
|
||||
|
@ -39,6 +39,11 @@ namespace __sanitizer {
|
||||
// the current thread has exclusive access to the data
|
||||
// if !h.exists() then the element never existed
|
||||
// }
|
||||
// {
|
||||
// Map::Handle h(&m, addr, false, true);
|
||||
// this will create a new element or return a handle to an existing element
|
||||
// if !h.created() this thread does *not* have exclusive access to the data
|
||||
// }
|
||||
template<typename T, uptr kSize>
|
||||
class AddrHashMap {
|
||||
private:
|
||||
@ -89,6 +94,12 @@ class AddrHashMap {
|
||||
bool create_;
|
||||
};
|
||||
|
||||
typedef void (*ForEachCallback)(const uptr key, const T &val, void *arg);
|
||||
// ForEach acquires a lock on each bucket while iterating over
|
||||
// elements. Note that this only ensures that the structure of the hashmap is
|
||||
// unchanged, there may be a data race to the element itself.
|
||||
void ForEach(ForEachCallback cb, void *arg);
|
||||
|
||||
private:
|
||||
friend class Handle;
|
||||
Bucket *table_;
|
||||
@ -98,6 +109,33 @@ class AddrHashMap {
|
||||
uptr calcHash(uptr addr);
|
||||
};
|
||||
|
||||
template <typename T, uptr kSize>
|
||||
void AddrHashMap<T, kSize>::ForEach(ForEachCallback cb, void *arg) {
|
||||
for (uptr n = 0; n < kSize; n++) {
|
||||
Bucket *bucket = &table_[n];
|
||||
|
||||
ReadLock lock(&bucket->mtx);
|
||||
|
||||
for (uptr i = 0; i < kBucketSize; i++) {
|
||||
Cell *c = &bucket->cells[i];
|
||||
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
|
||||
if (addr1 != 0)
|
||||
cb(addr1, c->val, arg);
|
||||
}
|
||||
|
||||
// Iterate over any additional cells.
|
||||
if (AddBucket *add =
|
||||
(AddBucket *)atomic_load(&bucket->add, memory_order_acquire)) {
|
||||
for (uptr i = 0; i < add->size; i++) {
|
||||
Cell *c = &add->cells[i];
|
||||
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
|
||||
if (addr1 != 0)
|
||||
cb(addr1, c->val, arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T, uptr kSize>
|
||||
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
|
||||
map_ = map;
|
||||
|
@ -112,15 +112,13 @@ class CombinedAllocator {
|
||||
return new_p;
|
||||
}
|
||||
|
||||
bool PointerIsMine(void *p) {
|
||||
bool PointerIsMine(const void *p) const {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return true;
|
||||
return secondary_.PointerIsMine(p);
|
||||
}
|
||||
|
||||
bool FromPrimary(void *p) {
|
||||
return primary_.PointerIsMine(p);
|
||||
}
|
||||
bool FromPrimary(const void *p) const { return primary_.PointerIsMine(p); }
|
||||
|
||||
void *GetMetaData(const void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
|
79
libsanitizer/sanitizer_common/sanitizer_allocator_dlsym.h
Normal file
79
libsanitizer/sanitizer_common/sanitizer_allocator_dlsym.h
Normal file
@ -0,0 +1,79 @@
|
||||
//===-- sanitizer_allocator_dlsym.h -----------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Hack: Sanitizer initializer calls dlsym which may need to allocate and call
|
||||
// back into uninitialized sanitizer.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ALLOCATOR_DLSYM_H
|
||||
#define SANITIZER_ALLOCATOR_DLSYM_H
|
||||
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
template <typename Details>
|
||||
struct DlSymAllocator {
|
||||
static bool Use() {
|
||||
// Fuchsia doesn't use dlsym-based interceptors.
|
||||
return !SANITIZER_FUCHSIA && UNLIKELY(Details::UseImpl());
|
||||
}
|
||||
|
||||
static bool PointerIsMine(const void *ptr) {
|
||||
// Fuchsia doesn't use dlsym-based interceptors.
|
||||
return !SANITIZER_FUCHSIA &&
|
||||
UNLIKELY(internal_allocator()->FromPrimary(ptr));
|
||||
}
|
||||
|
||||
static void *Allocate(uptr size_in_bytes) {
|
||||
void *ptr = InternalAlloc(size_in_bytes, nullptr, kWordSize);
|
||||
CHECK(internal_allocator()->FromPrimary(ptr));
|
||||
Details::OnAllocate(ptr,
|
||||
internal_allocator()->GetActuallyAllocatedSize(ptr));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void *Callocate(SIZE_T nmemb, SIZE_T size) {
|
||||
void *ptr = InternalCalloc(nmemb, size);
|
||||
CHECK(internal_allocator()->FromPrimary(ptr));
|
||||
Details::OnAllocate(ptr,
|
||||
internal_allocator()->GetActuallyAllocatedSize(ptr));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void Free(void *ptr) {
|
||||
uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);
|
||||
Details::OnFree(ptr, size);
|
||||
InternalFree(ptr);
|
||||
}
|
||||
|
||||
static void *Realloc(void *ptr, uptr new_size) {
|
||||
if (!ptr)
|
||||
return Allocate(new_size);
|
||||
CHECK(internal_allocator()->FromPrimary(ptr));
|
||||
if (!new_size) {
|
||||
Free(ptr);
|
||||
return nullptr;
|
||||
}
|
||||
uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);
|
||||
uptr memcpy_size = Min(new_size, size);
|
||||
void *new_ptr = Allocate(new_size);
|
||||
if (new_ptr)
|
||||
internal_memcpy(new_ptr, ptr, memcpy_size);
|
||||
Free(ptr);
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
static void OnAllocate(const void *ptr, uptr size) {}
|
||||
static void OnFree(const void *ptr, uptr size) {}
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_DLSYM_H
|
@ -189,7 +189,7 @@ class SizeClassAllocator32 {
|
||||
sci->free_list.push_front(b);
|
||||
}
|
||||
|
||||
bool PointerIsMine(const void *p) {
|
||||
bool PointerIsMine(const void *p) const {
|
||||
uptr mem = reinterpret_cast<uptr>(p);
|
||||
if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
|
||||
mem &= (kSpaceSize - 1);
|
||||
@ -293,9 +293,7 @@ class SizeClassAllocator32 {
|
||||
return res;
|
||||
}
|
||||
|
||||
uptr ComputeRegionBeg(uptr mem) {
|
||||
return mem & ~(kRegionSize - 1);
|
||||
}
|
||||
uptr ComputeRegionBeg(uptr mem) const { return mem & ~(kRegionSize - 1); }
|
||||
|
||||
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
|
@ -161,7 +161,7 @@ class LargeMmapAllocator {
|
||||
return res;
|
||||
}
|
||||
|
||||
bool PointerIsMine(const void *p) {
|
||||
bool PointerIsMine(const void *p) const {
|
||||
return GetBlockBegin(p) != nullptr;
|
||||
}
|
||||
|
||||
@ -179,7 +179,7 @@ class LargeMmapAllocator {
|
||||
return GetHeader(p) + 1;
|
||||
}
|
||||
|
||||
void *GetBlockBegin(const void *ptr) {
|
||||
void *GetBlockBegin(const void *ptr) const {
|
||||
uptr p = reinterpret_cast<uptr>(ptr);
|
||||
SpinMutexLock l(&mutex_);
|
||||
uptr nearest_chunk = 0;
|
||||
@ -301,7 +301,7 @@ class LargeMmapAllocator {
|
||||
return GetHeader(reinterpret_cast<uptr>(p));
|
||||
}
|
||||
|
||||
void *GetUser(const Header *h) {
|
||||
void *GetUser(const Header *h) const {
|
||||
CHECK(IsAligned((uptr)h, page_size_));
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
|
||||
}
|
||||
@ -318,5 +318,5 @@ class LargeMmapAllocator {
|
||||
struct Stats {
|
||||
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
|
||||
} stats;
|
||||
StaticSpinMutex mutex_;
|
||||
mutable StaticSpinMutex mutex_;
|
||||
};
|
||||
|
@ -293,7 +293,7 @@ class DeadlockDetector {
|
||||
}
|
||||
|
||||
// Returns true iff dtls is empty (no locks are currently held) and we can
|
||||
// add the node to the currently held locks w/o chanding the global state.
|
||||
// add the node to the currently held locks w/o changing the global state.
|
||||
// This operation is thread-safe as it only touches the dtls.
|
||||
bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
|
||||
if (!dtls->empty()) return false;
|
||||
|
@ -150,14 +150,34 @@ const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
#if SANITIZER_LINUX && defined(__x86_64__)
|
||||
#include "sanitizer_syscall_linux_x86_64.inc"
|
||||
#elif SANITIZER_LINUX && SANITIZER_RISCV64
|
||||
#include "sanitizer_syscall_linux_riscv64.inc"
|
||||
#elif SANITIZER_LINUX && defined(__aarch64__)
|
||||
#include "sanitizer_syscall_linux_aarch64.inc"
|
||||
#elif SANITIZER_LINUX && defined(__arm__)
|
||||
#include "sanitizer_syscall_linux_arm.inc"
|
||||
void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *old) {
|
||||
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, set, old));
|
||||
}
|
||||
|
||||
ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {
|
||||
__sanitizer_sigset_t set;
|
||||
internal_sigfillset(&set);
|
||||
# if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
// Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
|
||||
// on any thread, setuid call hangs.
|
||||
// See test/sanitizer_common/TestCases/Linux/setuid.c.
|
||||
internal_sigdelset(&set, 33);
|
||||
# endif
|
||||
SetSigProcMask(&set, &saved_);
|
||||
if (copy)
|
||||
internal_memcpy(copy, &saved_, sizeof(saved_));
|
||||
}
|
||||
|
||||
ScopedBlockSignals::~ScopedBlockSignals() { SetSigProcMask(&saved_, nullptr); }
|
||||
|
||||
# if SANITIZER_LINUX && defined(__x86_64__)
|
||||
# include "sanitizer_syscall_linux_x86_64.inc"
|
||||
# elif SANITIZER_LINUX && SANITIZER_RISCV64
|
||||
# include "sanitizer_syscall_linux_riscv64.inc"
|
||||
# elif SANITIZER_LINUX && defined(__aarch64__)
|
||||
# include "sanitizer_syscall_linux_aarch64.inc"
|
||||
# elif SANITIZER_LINUX && defined(__arm__)
|
||||
# include "sanitizer_syscall_linux_arm.inc"
|
||||
# elif SANITIZER_LINUX && defined(__hexagon__)
|
||||
# include "sanitizer_syscall_linux_hexagon.inc"
|
||||
# else
|
||||
@ -1741,17 +1761,9 @@ HandleSignalMode GetHandleSignalMode(int signum) {
|
||||
#if !SANITIZER_GO
|
||||
void *internal_start_thread(void *(*func)(void *arg), void *arg) {
|
||||
// Start the thread with signals blocked, otherwise it can steal user signals.
|
||||
__sanitizer_sigset_t set, old;
|
||||
internal_sigfillset(&set);
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
// Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
|
||||
// on any thread, setuid call hangs (see test/tsan/setuid.c).
|
||||
internal_sigdelset(&set, 33);
|
||||
#endif
|
||||
internal_sigprocmask(SIG_SETMASK, &set, &old);
|
||||
ScopedBlockSignals block(nullptr);
|
||||
void *th;
|
||||
real_pthread_create(&th, nullptr, func, arg);
|
||||
internal_sigprocmask(SIG_SETMASK, &old, nullptr);
|
||||
return th;
|
||||
}
|
||||
|
||||
@ -1773,7 +1785,7 @@ struct __sanitizer_esr_context {
|
||||
|
||||
static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
|
||||
static const u32 kEsrMagic = 0x45535201;
|
||||
u8 *aux = ucontext->uc_mcontext.__reserved;
|
||||
u8 *aux = reinterpret_cast<u8 *>(ucontext->uc_mcontext.__reserved);
|
||||
while (true) {
|
||||
_aarch64_ctx *ctx = (_aarch64_ctx *)aux;
|
||||
if (ctx->size == 0) break;
|
||||
|
@ -49,7 +49,17 @@ uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
|
||||
uptr internal_sigaltstack(const void* ss, void* oss);
|
||||
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
|
||||
__sanitizer_sigset_t *oldset);
|
||||
#if SANITIZER_GLIBC
|
||||
|
||||
void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset);
|
||||
struct ScopedBlockSignals {
|
||||
explicit ScopedBlockSignals(__sanitizer_sigset_t *copy);
|
||||
~ScopedBlockSignals();
|
||||
|
||||
private:
|
||||
__sanitizer_sigset_t saved_;
|
||||
};
|
||||
|
||||
# if SANITIZER_GLIBC
|
||||
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
|
||||
#endif
|
||||
|
||||
|
@ -759,13 +759,9 @@ u32 GetNumberOfCPUs() {
|
||||
#elif SANITIZER_SOLARIS
|
||||
return sysconf(_SC_NPROCESSORS_ONLN);
|
||||
#else
|
||||
#if defined(CPU_COUNT)
|
||||
cpu_set_t CPUs;
|
||||
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
|
||||
return CPU_COUNT(&CPUs);
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@
|
||||
extern char **environ;
|
||||
#endif
|
||||
|
||||
#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
|
||||
#if defined(__has_include) && __has_include(<os/trace.h>)
|
||||
#define SANITIZER_OS_TRACE 1
|
||||
#include <os/trace.h>
|
||||
#else
|
||||
@ -70,15 +70,7 @@ extern "C" {
|
||||
#include <mach/mach_time.h>
|
||||
#include <mach/vm_statistics.h>
|
||||
#include <malloc/malloc.h>
|
||||
#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format)
|
||||
# include <os/log.h>
|
||||
#else
|
||||
/* Without support for __builtin_os_log_format, fall back to the older
|
||||
method. */
|
||||
# define OS_LOG_DEFAULT 0
|
||||
# define os_log_error(A,B,C) \
|
||||
asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
|
||||
#endif
|
||||
#include <os/log.h>
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#include <signal.h>
|
||||
@ -551,6 +543,9 @@ uptr TlsBaseAddr() {
|
||||
asm("movq %%gs:0,%0" : "=r"(segbase));
|
||||
#elif defined(__i386__)
|
||||
asm("movl %%gs:0,%0" : "=r"(segbase));
|
||||
#elif defined(__aarch64__)
|
||||
asm("mrs %x0, tpidrro_el0" : "=r"(segbase));
|
||||
segbase &= 0x07ul; // clearing lower bits, cpu id stored there
|
||||
#endif
|
||||
return segbase;
|
||||
}
|
||||
|
@ -14,26 +14,6 @@
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
|
||||
TARGET_OS_MAC (we have no support for iOS in any form for these versions,
|
||||
so there's no ambiguity). */
|
||||
#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
|
||||
# define TARGET_OS_OSX 1
|
||||
#endif
|
||||
|
||||
/* Other TARGET_OS_xxx are not present on earlier versions, define them to
|
||||
0 (we have no support for them; they are not valid targets anyway). */
|
||||
#ifndef TARGET_OS_IOS
|
||||
#define TARGET_OS_IOS 0
|
||||
#endif
|
||||
#ifndef TARGET_OS_TV
|
||||
#define TARGET_OS_TV 0
|
||||
#endif
|
||||
#ifndef TARGET_OS_WATCH
|
||||
#define TARGET_OS_WATCH 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_MAC
|
||||
#include "sanitizer_posix.h"
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_dlsym.h"
|
||||
#include "sanitizer_common/sanitizer_mac.h"
|
||||
|
||||
// Similar code is used in Google Perftools,
|
||||
@ -192,20 +193,15 @@ void *__sanitizer_mz_malloc(malloc_zone_t *zone, uptr size) {
|
||||
return p;
|
||||
}
|
||||
|
||||
struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
|
||||
static bool UseImpl() { return !COMMON_MALLOC_SANITIZER_INITIALIZED; }
|
||||
};
|
||||
|
||||
extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__sanitizer_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
|
||||
if (UNLIKELY(!COMMON_MALLOC_SANITIZER_INITIALIZED)) {
|
||||
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
|
||||
const size_t kCallocPoolSize = 1024;
|
||||
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
|
||||
static size_t allocated;
|
||||
size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
|
||||
void *mem = (void*)&calloc_memory_for_dlsym[allocated];
|
||||
allocated += size_in_words;
|
||||
CHECK(allocated < kCallocPoolSize);
|
||||
return mem;
|
||||
}
|
||||
if (DlsymAlloc::Use())
|
||||
return DlsymAlloc::Callocate(nmemb, size);
|
||||
COMMON_MALLOC_CALLOC(nmemb, size);
|
||||
return p;
|
||||
}
|
||||
@ -223,6 +219,8 @@ extern "C"
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_mz_free(malloc_zone_t *zone, void *ptr) {
|
||||
if (!ptr) return;
|
||||
if (DlsymAlloc::PointerIsMine(ptr))
|
||||
return DlsymAlloc::Free(ptr);
|
||||
COMMON_MALLOC_FREE(ptr);
|
||||
}
|
||||
|
||||
|
@ -465,9 +465,9 @@
|
||||
#define SANITIZER_INTERCEPT_STAT \
|
||||
(SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS || \
|
||||
SI_STAT_LINUX)
|
||||
#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD)
|
||||
#define SANITIZER_INTERCEPT___XSTAT \
|
||||
(!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX
|
||||
#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX)
|
||||
#define SANITIZER_INTERCEPT___XSTAT \
|
||||
((!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX)
|
||||
#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
|
||||
#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
|
||||
#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
|
||||
|
@ -26,10 +26,7 @@
|
||||
|
||||
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
|
||||
// are not defined anywhere in userspace headers. Fake them. This seems to work
|
||||
// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat
|
||||
// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
|
||||
// Also, for some platforms (e.g. mips) there are additional members in the
|
||||
// <sys/stat.h> struct stat:s.
|
||||
// fine with newer headers, too.
|
||||
#include <linux/posix_types.h>
|
||||
# if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
|
||||
# include <sys/stat.h>
|
||||
|
@ -83,7 +83,7 @@ const unsigned struct_kernel_stat64_sz = 104;
|
||||
#elif defined(__mips__)
|
||||
const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
|
||||
? FIRST_32_SECOND_64(104, 128)
|
||||
: FIRST_32_SECOND_64(144, 216);
|
||||
: FIRST_32_SECOND_64(160, 216);
|
||||
const unsigned struct_kernel_stat64_sz = 104;
|
||||
#elif defined(__s390__) && !defined(__s390x__)
|
||||
const unsigned struct_kernel_stat_sz = 64;
|
||||
|
@ -65,13 +65,23 @@ class MemoryMappedSegment {
|
||||
MemoryMappedSegmentData *data_;
|
||||
};
|
||||
|
||||
class MemoryMappingLayout {
|
||||
class MemoryMappingLayoutBase {
|
||||
public:
|
||||
virtual bool Next(MemoryMappedSegment *segment) { UNIMPLEMENTED(); }
|
||||
virtual bool Error() const { UNIMPLEMENTED(); };
|
||||
virtual void Reset() { UNIMPLEMENTED(); }
|
||||
|
||||
protected:
|
||||
~MemoryMappingLayoutBase() {}
|
||||
};
|
||||
|
||||
class MemoryMappingLayout final : public MemoryMappingLayoutBase {
|
||||
public:
|
||||
explicit MemoryMappingLayout(bool cache_enabled);
|
||||
~MemoryMappingLayout();
|
||||
bool Next(MemoryMappedSegment *segment);
|
||||
bool Error() const;
|
||||
void Reset();
|
||||
virtual bool Next(MemoryMappedSegment *segment) override;
|
||||
virtual bool Error() const override;
|
||||
virtual void Reset() override;
|
||||
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
|
||||
// to obtain the memory mappings. It should fall back to pre-cached data
|
||||
// instead of aborting.
|
||||
|
@ -86,8 +86,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
|
||||
// Nope, this does not look right either. This means the frame after next does
|
||||
// not have a valid frame pointer, but we can still extract the caller PC.
|
||||
// Unfortunately, there is no way to decide between GCC and LLVM frame
|
||||
// layouts. Assume GCC.
|
||||
return bp_prev - 1;
|
||||
// layouts. Assume LLVM.
|
||||
return bp_prev;
|
||||
#else
|
||||
return (uhwptr*)bp;
|
||||
#endif
|
||||
@ -110,21 +110,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
|
||||
IsAligned((uptr)frame, sizeof(*frame)) &&
|
||||
size < max_depth) {
|
||||
#ifdef __powerpc__
|
||||
// PowerPC ABIs specify that the return address is saved on the
|
||||
// *caller's* stack frame. Thus we must dereference the back chain
|
||||
// to find the caller frame before extracting it.
|
||||
// PowerPC ABIs specify that the return address is saved at offset
|
||||
// 16 of the *caller's* stack frame. Thus we must dereference the
|
||||
// back chain to find the caller frame before extracting it.
|
||||
uhwptr *caller_frame = (uhwptr*)frame[0];
|
||||
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
|
||||
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
|
||||
break;
|
||||
// For most ABIs the offset where the return address is saved is two
|
||||
// register sizes. The exception is the SVR4 ABI, which uses an
|
||||
// offset of only one register size.
|
||||
#ifdef _CALL_SYSV
|
||||
uhwptr pc1 = caller_frame[1];
|
||||
#else
|
||||
uhwptr pc1 = caller_frame[2];
|
||||
#endif
|
||||
#elif defined(__s390__)
|
||||
uhwptr pc1 = frame[14];
|
||||
#elif defined(__riscv)
|
||||
|
@ -2189,7 +2189,7 @@ void atfork_child() {
|
||||
return;
|
||||
ThreadState *thr = cur_thread();
|
||||
const uptr pc = StackTrace::GetCurrentPc();
|
||||
ForkChildAfter(thr, pc);
|
||||
ForkChildAfter(thr, pc, true);
|
||||
FdOnFork(thr, pc);
|
||||
}
|
||||
|
||||
@ -2210,6 +2210,37 @@ TSAN_INTERCEPTOR(int, vfork, int fake) {
|
||||
return WRAP(fork)(fake);
|
||||
}
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
|
||||
void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
|
||||
SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
|
||||
child_tid);
|
||||
struct Arg {
|
||||
int (*fn)(void *);
|
||||
void *arg;
|
||||
};
|
||||
auto wrapper = +[](void *p) -> int {
|
||||
auto *thr = cur_thread();
|
||||
uptr pc = GET_CURRENT_PC();
|
||||
// Start the background thread for fork, but not for clone.
|
||||
// For fork we did this always and it's known to work (or user code has
|
||||
// adopted). But if we do this for the new clone interceptor some code
|
||||
// (sandbox2) fails. So model we used to do for years and don't start the
|
||||
// background thread after clone.
|
||||
ForkChildAfter(thr, pc, false);
|
||||
FdOnFork(thr, pc);
|
||||
auto *arg = static_cast<Arg *>(p);
|
||||
return arg->fn(arg->arg);
|
||||
};
|
||||
ForkBefore(thr, pc);
|
||||
Arg arg_wrapper = {fn, arg};
|
||||
int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
|
||||
child_tid);
|
||||
ForkParentAfter(thr, pc);
|
||||
return pid;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !SANITIZER_MAC && !SANITIZER_ANDROID
|
||||
typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
|
||||
void *data);
|
||||
@ -2544,7 +2575,7 @@ static void syscall_post_fork(uptr pc, int pid) {
|
||||
ThreadState *thr = cur_thread();
|
||||
if (pid == 0) {
|
||||
// child
|
||||
ForkChildAfter(thr, pc);
|
||||
ForkChildAfter(thr, pc, true);
|
||||
FdOnFork(thr, pc);
|
||||
} else if (pid > 0) {
|
||||
// parent
|
||||
@ -2841,6 +2872,9 @@ void InitializeInterceptors() {
|
||||
|
||||
TSAN_INTERCEPT(fork);
|
||||
TSAN_INTERCEPT(vfork);
|
||||
#if SANITIZER_LINUX
|
||||
TSAN_INTERCEPT(clone);
|
||||
#endif
|
||||
#if !SANITIZER_ANDROID
|
||||
TSAN_INTERCEPT(dl_iterate_phdr);
|
||||
#endif
|
||||
|
@ -506,7 +506,8 @@ void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
|
||||
ctx->thread_registry.Unlock();
|
||||
}
|
||||
|
||||
void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
|
||||
void ForkChildAfter(ThreadState *thr, uptr pc,
|
||||
bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
|
||||
thr->suppress_reports--; // Enabled in ForkBefore.
|
||||
thr->ignore_interceptors--;
|
||||
ScopedErrorReportLock::Unlock();
|
||||
@ -518,7 +519,8 @@ void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
|
||||
VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
|
||||
" parent had %d threads\n", (int)internal_getpid(), (int)nthread);
|
||||
if (nthread == 1) {
|
||||
StartBackgroundThread();
|
||||
if (start_thread)
|
||||
StartBackgroundThread();
|
||||
} else {
|
||||
// We've just forked a multi-threaded process. We cannot reasonably function
|
||||
// after that (some mutexes may be locked before fork). So just enable
|
||||
|
@ -440,7 +440,7 @@ void InitializeDynamicAnnotations();
|
||||
|
||||
void ForkBefore(ThreadState *thr, uptr pc);
|
||||
void ForkParentAfter(ThreadState *thr, uptr pc);
|
||||
void ForkChildAfter(ThreadState *thr, uptr pc);
|
||||
void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
|
||||
|
||||
void ReportRace(ThreadState *thr);
|
||||
bool OutputReport(ThreadState *thr, const ScopedReport &srep);
|
||||
|
@ -42,6 +42,25 @@ ASM_SYMBOL(__tsan_trace_switch_thunk):
|
||||
push %r11
|
||||
CFI_ADJUST_CFA_OFFSET(8)
|
||||
CFI_REL_OFFSET(%r11, 0)
|
||||
# All XMM registers are caller-saved.
|
||||
sub $0x100, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET(0x100)
|
||||
vmovdqu %xmm0, 0x0(%rsp)
|
||||
vmovdqu %xmm1, 0x10(%rsp)
|
||||
vmovdqu %xmm2, 0x20(%rsp)
|
||||
vmovdqu %xmm3, 0x30(%rsp)
|
||||
vmovdqu %xmm4, 0x40(%rsp)
|
||||
vmovdqu %xmm5, 0x50(%rsp)
|
||||
vmovdqu %xmm6, 0x60(%rsp)
|
||||
vmovdqu %xmm7, 0x70(%rsp)
|
||||
vmovdqu %xmm8, 0x80(%rsp)
|
||||
vmovdqu %xmm9, 0x90(%rsp)
|
||||
vmovdqu %xmm10, 0xa0(%rsp)
|
||||
vmovdqu %xmm11, 0xb0(%rsp)
|
||||
vmovdqu %xmm12, 0xc0(%rsp)
|
||||
vmovdqu %xmm13, 0xd0(%rsp)
|
||||
vmovdqu %xmm14, 0xe0(%rsp)
|
||||
vmovdqu %xmm15, 0xf0(%rsp)
|
||||
# Align stack frame.
|
||||
push %rbx # non-scratch
|
||||
CFI_ADJUST_CFA_OFFSET(8)
|
||||
@ -59,6 +78,24 @@ ASM_SYMBOL(__tsan_trace_switch_thunk):
|
||||
pop %rbx
|
||||
CFI_ADJUST_CFA_OFFSET(-8)
|
||||
# Restore scratch registers.
|
||||
vmovdqu 0x0(%rsp), %xmm0
|
||||
vmovdqu 0x10(%rsp), %xmm1
|
||||
vmovdqu 0x20(%rsp), %xmm2
|
||||
vmovdqu 0x30(%rsp), %xmm3
|
||||
vmovdqu 0x40(%rsp), %xmm4
|
||||
vmovdqu 0x50(%rsp), %xmm5
|
||||
vmovdqu 0x60(%rsp), %xmm6
|
||||
vmovdqu 0x70(%rsp), %xmm7
|
||||
vmovdqu 0x80(%rsp), %xmm8
|
||||
vmovdqu 0x90(%rsp), %xmm9
|
||||
vmovdqu 0xa0(%rsp), %xmm10
|
||||
vmovdqu 0xb0(%rsp), %xmm11
|
||||
vmovdqu 0xc0(%rsp), %xmm12
|
||||
vmovdqu 0xd0(%rsp), %xmm13
|
||||
vmovdqu 0xe0(%rsp), %xmm14
|
||||
vmovdqu 0xf0(%rsp), %xmm15
|
||||
add $0x100, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET(-0x100)
|
||||
pop %r11
|
||||
CFI_ADJUST_CFA_OFFSET(-8)
|
||||
pop %r10
|
||||
@ -123,6 +160,25 @@ ASM_SYMBOL(__tsan_report_race_thunk):
|
||||
push %r11
|
||||
CFI_ADJUST_CFA_OFFSET(8)
|
||||
CFI_REL_OFFSET(%r11, 0)
|
||||
# All XMM registers are caller-saved.
|
||||
sub $0x100, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET(0x100)
|
||||
vmovdqu %xmm0, 0x0(%rsp)
|
||||
vmovdqu %xmm1, 0x10(%rsp)
|
||||
vmovdqu %xmm2, 0x20(%rsp)
|
||||
vmovdqu %xmm3, 0x30(%rsp)
|
||||
vmovdqu %xmm4, 0x40(%rsp)
|
||||
vmovdqu %xmm5, 0x50(%rsp)
|
||||
vmovdqu %xmm6, 0x60(%rsp)
|
||||
vmovdqu %xmm7, 0x70(%rsp)
|
||||
vmovdqu %xmm8, 0x80(%rsp)
|
||||
vmovdqu %xmm9, 0x90(%rsp)
|
||||
vmovdqu %xmm10, 0xa0(%rsp)
|
||||
vmovdqu %xmm11, 0xb0(%rsp)
|
||||
vmovdqu %xmm12, 0xc0(%rsp)
|
||||
vmovdqu %xmm13, 0xd0(%rsp)
|
||||
vmovdqu %xmm14, 0xe0(%rsp)
|
||||
vmovdqu %xmm15, 0xf0(%rsp)
|
||||
# Align stack frame.
|
||||
push %rbx # non-scratch
|
||||
CFI_ADJUST_CFA_OFFSET(8)
|
||||
@ -140,6 +196,24 @@ ASM_SYMBOL(__tsan_report_race_thunk):
|
||||
pop %rbx
|
||||
CFI_ADJUST_CFA_OFFSET(-8)
|
||||
# Restore scratch registers.
|
||||
vmovdqu 0x0(%rsp), %xmm0
|
||||
vmovdqu 0x10(%rsp), %xmm1
|
||||
vmovdqu 0x20(%rsp), %xmm2
|
||||
vmovdqu 0x30(%rsp), %xmm3
|
||||
vmovdqu 0x40(%rsp), %xmm4
|
||||
vmovdqu 0x50(%rsp), %xmm5
|
||||
vmovdqu 0x60(%rsp), %xmm6
|
||||
vmovdqu 0x70(%rsp), %xmm7
|
||||
vmovdqu 0x80(%rsp), %xmm8
|
||||
vmovdqu 0x90(%rsp), %xmm9
|
||||
vmovdqu 0xa0(%rsp), %xmm10
|
||||
vmovdqu 0xb0(%rsp), %xmm11
|
||||
vmovdqu 0xc0(%rsp), %xmm12
|
||||
vmovdqu 0xd0(%rsp), %xmm13
|
||||
vmovdqu 0xe0(%rsp), %xmm14
|
||||
vmovdqu 0xf0(%rsp), %xmm15
|
||||
add $0x100, %rsp
|
||||
CFI_ADJUST_CFA_OFFSET(-0x100)
|
||||
pop %r11
|
||||
CFI_ADJUST_CFA_OFFSET(-8)
|
||||
pop %r10
|
||||
|
@ -1,6 +1,5 @@
|
||||
#include "tsan_ppc_regs.h"
|
||||
|
||||
.machine altivec
|
||||
.section .text
|
||||
.hidden __tsan_setjmp
|
||||
.globl _setjmp
|
||||
|
@ -50,7 +50,6 @@ void InitializeFlags() {
|
||||
{
|
||||
CommonFlags cf;
|
||||
cf.CopyFrom(*common_flags());
|
||||
cf.print_summary = false;
|
||||
cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH");
|
||||
OverrideCommonFlags(cf);
|
||||
}
|
||||
|
@ -894,21 +894,6 @@ void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
|
||||
|
||||
} // namespace __ubsan
|
||||
|
||||
void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData,
|
||||
ValueHandle Function) {
|
||||
GET_REPORT_OPTIONS(false);
|
||||
CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
|
||||
handleCFIBadIcall(&Data, Function, Opts);
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData,
|
||||
ValueHandle Function) {
|
||||
GET_REPORT_OPTIONS(true);
|
||||
CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
|
||||
handleCFIBadIcall(&Data, Function, Opts);
|
||||
Die();
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
|
||||
ValueHandle Value,
|
||||
uptr ValidVtable) {
|
||||
|
@ -215,20 +215,12 @@ enum CFITypeCheckKind : unsigned char {
|
||||
CFITCK_VMFCall,
|
||||
};
|
||||
|
||||
struct CFIBadIcallData {
|
||||
SourceLocation Loc;
|
||||
const TypeDescriptor &Type;
|
||||
};
|
||||
|
||||
struct CFICheckFailData {
|
||||
CFITypeCheckKind CheckKind;
|
||||
SourceLocation Loc;
|
||||
const TypeDescriptor &Type;
|
||||
};
|
||||
|
||||
/// \brief Handle control flow integrity failure for indirect function calls.
|
||||
RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
|
||||
|
||||
/// \brief Handle control flow integrity failures.
|
||||
RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
|
||||
uptr VtableIsValid)
|
||||
|
@ -12,7 +12,6 @@
|
||||
#ifndef UBSAN_PLATFORM_H
|
||||
#define UBSAN_PLATFORM_H
|
||||
|
||||
#ifndef CAN_SANITIZE_UB
|
||||
// Other platforms should be easy to add, and probably work as-is.
|
||||
#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
|
||||
defined(__NetBSD__) || defined(__DragonFly__) || \
|
||||
@ -22,6 +21,5 @@
|
||||
#else
|
||||
# define CAN_SANITIZE_UB 0
|
||||
#endif
|
||||
#endif //CAN_SANITIZE_UB
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user