mirror of
https://gcc.gnu.org/git/gcc.git
synced 2024-11-27 05:44:15 +08:00
libsanitizer: Merge with upstream
Merged revision: fdf4c035225de52f596899931b1f6100e5e3e928
This commit is contained in:
parent
881d1689a4
commit
2e3d50c095
@ -1,4 +1,4 @@
|
||||
1c2e5fd66ea27d0c51360ba4e22099124a915562
|
||||
fdf4c035225de52f596899931b1f6100e5e3e928
|
||||
|
||||
The first line of this file holds the git revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
@ -908,13 +908,6 @@ AllocType AsanChunkView::GetAllocType() const {
|
||||
return (AllocType)chunk_->alloc_type;
|
||||
}
|
||||
|
||||
static StackTrace GetStackTraceFromId(u32 id) {
|
||||
CHECK(id);
|
||||
StackTrace res = StackDepotGet(id);
|
||||
CHECK(res.trace);
|
||||
return res;
|
||||
}
|
||||
|
||||
u32 AsanChunkView::GetAllocStackId() const {
|
||||
u32 tid = 0;
|
||||
u32 stack = 0;
|
||||
@ -931,14 +924,6 @@ u32 AsanChunkView::GetFreeStackId() const {
|
||||
return stack;
|
||||
}
|
||||
|
||||
StackTrace AsanChunkView::GetAllocStack() const {
|
||||
return GetStackTraceFromId(GetAllocStackId());
|
||||
}
|
||||
|
||||
StackTrace AsanChunkView::GetFreeStack() const {
|
||||
return GetStackTraceFromId(GetFreeStackId());
|
||||
}
|
||||
|
||||
void InitializeAllocator(const AllocatorOptions &options) {
|
||||
instance.InitLinkerInitialized(options);
|
||||
}
|
||||
|
@ -64,8 +64,6 @@ class AsanChunkView {
|
||||
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
|
||||
u32 GetAllocStackId() const;
|
||||
u32 GetFreeStackId() const;
|
||||
StackTrace GetAllocStack() const;
|
||||
StackTrace GetFreeStack() const;
|
||||
AllocType GetAllocType() const;
|
||||
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
|
||||
if (addr >= Beg() && (addr + access_size) <= End()) {
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
|
||||
namespace {
|
||||
using namespace __asan;
|
||||
@ -54,11 +55,11 @@ uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
|
||||
StackTrace stack(nullptr, 0);
|
||||
if (alloc_stack) {
|
||||
if (chunk.AllocTid() == kInvalidTid) return 0;
|
||||
stack = chunk.GetAllocStack();
|
||||
stack = StackDepotGet(chunk.GetAllocStackId());
|
||||
if (thread_id) *thread_id = chunk.AllocTid();
|
||||
} else {
|
||||
if (chunk.FreeTid() == kInvalidTid) return 0;
|
||||
stack = chunk.GetFreeStack();
|
||||
stack = StackDepotGet(chunk.GetFreeStackId());
|
||||
if (thread_id) *thread_id = chunk.FreeTid();
|
||||
}
|
||||
|
||||
|
@ -154,6 +154,23 @@ static void CheckODRViolationViaIndicator(const Global *g) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check ODR violation for given global G by checking if it's already poisoned.
|
||||
// We use this method in case compiler doesn't use private aliases for global
|
||||
// variables.
|
||||
static void CheckODRViolationViaPoisoning(const Global *g) {
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
// This check may not be enough: if the first global is much larger
|
||||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clang provides two different ways for global variables protection:
|
||||
// it can poison the global itself or its private alias. In former
|
||||
// case we may poison same symbol multiple times, that can help us to
|
||||
@ -199,6 +216,8 @@ static void RegisterGlobal(const Global *g) {
|
||||
// where two globals with the same name are defined in different modules.
|
||||
if (UseODRIndicator(g))
|
||||
CheckODRViolationViaIndicator(g);
|
||||
else
|
||||
CheckODRViolationViaPoisoning(g);
|
||||
}
|
||||
if (CanPoisonMemory())
|
||||
PoisonRedZones(*g);
|
||||
|
@ -81,12 +81,7 @@ void InitializePlatformInterceptors();
|
||||
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
|
||||
!SANITIZER_NETBSD
|
||||
# define ASAN_INTERCEPT___CXA_THROW 1
|
||||
# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
|
||||
|| ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# else
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
|
||||
# endif
|
||||
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
|
||||
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
|
||||
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
|
||||
# else
|
||||
|
@ -165,7 +165,7 @@ static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
|
||||
static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
|
||||
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
|
||||
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
|
||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
|
||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
|
||||
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
|
||||
static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
|
||||
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
|
@ -124,9 +124,9 @@ static void PrintAccumulatedStats() {
|
||||
// Use lock to keep reports from mixing up.
|
||||
Lock lock(&print_lock);
|
||||
stats.Print();
|
||||
StackDepotStats *stack_depot_stats = StackDepotGetStats();
|
||||
StackDepotStats stack_depot_stats = StackDepotGetStats();
|
||||
Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
|
||||
stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
|
||||
stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
|
||||
PrintInternalAllocatorStats();
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ static void CheckUnwind() {
|
||||
static void HwasanFormatMemoryUsage(InternalScopedString &s) {
|
||||
HwasanThreadList &thread_list = hwasanThreadList();
|
||||
auto thread_stats = thread_list.GetThreadStats();
|
||||
auto *sds = StackDepotGetStats();
|
||||
auto sds = StackDepotGetStats();
|
||||
AllocatorStatCounters asc;
|
||||
GetAllocatorStats(asc);
|
||||
s.append(
|
||||
@ -151,7 +151,7 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) {
|
||||
internal_getpid(), GetRSS(), thread_stats.n_live_threads,
|
||||
thread_stats.total_stack_size,
|
||||
thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(),
|
||||
sds->allocated, sds->n_uniq_ids, asc[AllocatorStatMapped]);
|
||||
sds.allocated, sds.n_uniq_ids, asc[AllocatorStatMapped]);
|
||||
}
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
|
@ -35,6 +35,7 @@
|
||||
ASM_TYPE_FUNCTION(__interceptor_setjmp)
|
||||
__interceptor_setjmp:
|
||||
CFI_STARTPROC
|
||||
_CET_ENDBR
|
||||
xorl %esi, %esi
|
||||
jmp __interceptor_sigsetjmp
|
||||
CFI_ENDPROC
|
||||
@ -44,6 +45,7 @@ ASM_SIZE(__interceptor_setjmp)
|
||||
ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
|
||||
__interceptor_sigsetjmp:
|
||||
CFI_STARTPROC
|
||||
_CET_ENDBR
|
||||
|
||||
// Save callee save registers.
|
||||
mov %rbx, (0*8)(%rdi)
|
||||
|
@ -79,7 +79,8 @@ class LeakSuppressionContext {
|
||||
int suppression_types_num)
|
||||
: context(supprression_types, suppression_types_num) {}
|
||||
|
||||
Suppression *GetSuppressionForStack(u32 stack_trace_id);
|
||||
Suppression *GetSuppressionForStack(u32 stack_trace_id,
|
||||
const StackTrace &stack);
|
||||
|
||||
const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
|
||||
if (!suppressed_stacks_sorted) {
|
||||
@ -477,9 +478,7 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
|
||||
}
|
||||
}
|
||||
|
||||
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
|
||||
CHECK(stack_id);
|
||||
StackTrace stack = map->Get(stack_id);
|
||||
static uptr GetCallerPC(const StackTrace &stack) {
|
||||
// The top frame is our malloc/calloc/etc. The next frame is the caller.
|
||||
if (stack.size >= 2)
|
||||
return stack.trace[1];
|
||||
@ -488,7 +487,7 @@ static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
|
||||
|
||||
struct InvalidPCParam {
|
||||
Frontier *frontier;
|
||||
StackDepotReverseMap *stack_depot_reverse_map;
|
||||
const StackDepotReverseMap *stack_depot;
|
||||
bool skip_linker_allocations;
|
||||
};
|
||||
|
||||
@ -503,7 +502,7 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) {
|
||||
u32 stack_id = m.stack_trace_id();
|
||||
uptr caller_pc = 0;
|
||||
if (stack_id > 0)
|
||||
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
|
||||
caller_pc = GetCallerPC(param->stack_depot->Get(stack_id));
|
||||
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
||||
// it as reachable, as we can't properly report its allocation stack anyway.
|
||||
if (caller_pc == 0 || (param->skip_linker_allocations &&
|
||||
@ -534,11 +533,11 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) {
|
||||
// which we don't care about).
|
||||
// On all other platforms, this simply checks to ensure that the caller pc is
|
||||
// valid before reporting chunks as leaked.
|
||||
void ProcessPC(Frontier *frontier) {
|
||||
StackDepotReverseMap stack_depot_reverse_map;
|
||||
static void ProcessPC(Frontier *frontier,
|
||||
const StackDepotReverseMap &stack_depot) {
|
||||
InvalidPCParam arg;
|
||||
arg.frontier = frontier;
|
||||
arg.stack_depot_reverse_map = &stack_depot_reverse_map;
|
||||
arg.stack_depot = &stack_depot;
|
||||
arg.skip_linker_allocations =
|
||||
flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
|
||||
ForEachChunk(MarkInvalidPCCb, &arg);
|
||||
@ -546,6 +545,7 @@ void ProcessPC(Frontier *frontier) {
|
||||
|
||||
// Sets the appropriate tag on each chunk.
|
||||
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
|
||||
const StackDepotReverseMap &stack_depot,
|
||||
Frontier *frontier) {
|
||||
const InternalMmapVector<u32> &suppressed_stacks =
|
||||
GetSuppressionContext()->GetSortedSuppressedStacks();
|
||||
@ -560,7 +560,7 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
|
||||
FloodFillTag(frontier, kReachable);
|
||||
|
||||
CHECK_EQ(0, frontier->size());
|
||||
ProcessPC(frontier);
|
||||
ProcessPC(frontier, stack_depot);
|
||||
|
||||
// The check here is relatively expensive, so we do this in a separate flood
|
||||
// fill. That way we can skip the check for chunks that are reachable
|
||||
@ -584,11 +584,6 @@ static void ResetTagsCb(uptr chunk, void *arg) {
|
||||
m.set_tag(kDirectlyLeaked);
|
||||
}
|
||||
|
||||
static void PrintStackTraceById(u32 stack_trace_id) {
|
||||
CHECK(stack_trace_id);
|
||||
StackDepotGet(stack_trace_id).Print();
|
||||
}
|
||||
|
||||
// ForEachChunk callback. Aggregates information about unreachable chunks into
|
||||
// a LeakReport.
|
||||
static void CollectLeaksCb(uptr chunk, void *arg) {
|
||||
@ -598,16 +593,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
|
||||
LsanMetadata m(chunk);
|
||||
if (!m.allocated()) return;
|
||||
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
|
||||
u32 resolution = flags()->resolution;
|
||||
u32 stack_trace_id = 0;
|
||||
if (resolution > 0) {
|
||||
StackTrace stack = StackDepotGet(m.stack_trace_id());
|
||||
stack.size = Min(stack.size, resolution);
|
||||
stack_trace_id = StackDepotPut(stack);
|
||||
} else {
|
||||
stack_trace_id = m.stack_trace_id();
|
||||
}
|
||||
leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
|
||||
leak_report->AddLeakedChunk(chunk, m.stack_trace_id(), m.requested_size(),
|
||||
m.tag());
|
||||
}
|
||||
}
|
||||
@ -668,7 +654,8 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
|
||||
CHECK(param);
|
||||
CHECK(!param->success);
|
||||
ReportUnsuspendedThreads(suspended_threads);
|
||||
ClassifyAllChunks(suspended_threads, ¶m->frontier);
|
||||
ClassifyAllChunks(suspended_threads, param->leak_report.stack_depot(),
|
||||
¶m->frontier);
|
||||
ForEachChunk(CollectLeaksCb, ¶m->leak_report);
|
||||
// Clean up for subsequent leak checks. This assumes we did not overwrite any
|
||||
// kIgnored tags.
|
||||
@ -780,9 +767,8 @@ Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
|
||||
}
|
||||
|
||||
Suppression *LeakSuppressionContext::GetSuppressionForStack(
|
||||
u32 stack_trace_id) {
|
||||
u32 stack_trace_id, const StackTrace &stack) {
|
||||
LazyInit();
|
||||
StackTrace stack = StackDepotGet(stack_trace_id);
|
||||
for (uptr i = 0; i < stack.size; i++) {
|
||||
Suppression *s = GetSuppressionForAddr(
|
||||
StackTrace::GetPreviousInstructionPc(stack.trace[i]));
|
||||
@ -807,6 +793,13 @@ const uptr kMaxLeaksConsidered = 5000;
|
||||
void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
|
||||
uptr leaked_size, ChunkTag tag) {
|
||||
CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
|
||||
|
||||
if (u32 resolution = flags()->resolution) {
|
||||
StackTrace stack = stack_depot_.Get(stack_trace_id);
|
||||
stack.size = Min(stack.size, resolution);
|
||||
stack_trace_id = StackDepotPut(stack);
|
||||
}
|
||||
|
||||
bool is_directly_leaked = (tag == kDirectlyLeaked);
|
||||
uptr i;
|
||||
for (i = 0; i < leaks_.size(); i++) {
|
||||
@ -869,7 +862,8 @@ void LeakReport::PrintReportForLeak(uptr index) {
|
||||
leaks_[index].total_size, leaks_[index].hit_count);
|
||||
Printf("%s", d.Default());
|
||||
|
||||
PrintStackTraceById(leaks_[index].stack_trace_id);
|
||||
CHECK(leaks_[index].stack_trace_id);
|
||||
stack_depot_.Get(leaks_[index].stack_trace_id).Print();
|
||||
|
||||
if (flags()->report_objects) {
|
||||
Printf("Objects leaked above:\n");
|
||||
@ -905,8 +899,8 @@ uptr LeakReport::ApplySuppressions() {
|
||||
LeakSuppressionContext *suppressions = GetSuppressionContext();
|
||||
uptr new_suppressions = false;
|
||||
for (uptr i = 0; i < leaks_.size(); i++) {
|
||||
Suppression *s =
|
||||
suppressions->GetSuppressionForStack(leaks_[i].stack_trace_id);
|
||||
Suppression *s = suppressions->GetSuppressionForStack(
|
||||
leaks_[i].stack_trace_id, stack_depot_.Get(leaks_[i].stack_trace_id));
|
||||
if (s) {
|
||||
s->weight += leaks_[i].total_size;
|
||||
atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_stoptheworld.h"
|
||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||
|
||||
@ -107,12 +108,14 @@ class LeakReport {
|
||||
uptr ApplySuppressions();
|
||||
uptr UnsuppressedLeakCount();
|
||||
uptr IndirectUnsuppressedLeakCount();
|
||||
const StackDepotReverseMap &stack_depot() { return stack_depot_; }
|
||||
|
||||
private:
|
||||
void PrintReportForLeak(uptr index);
|
||||
void PrintLeakedObjectsForLeak(uptr index);
|
||||
|
||||
u32 next_id_ = 0;
|
||||
StackDepotReverseMap stack_depot_;
|
||||
InternalMmapVector<Leak> leaks_;
|
||||
InternalMmapVector<LeakedObject> leaked_objects_;
|
||||
};
|
||||
|
@ -66,3 +66,7 @@
|
||||
#else
|
||||
#define NO_EXEC_STACK_DIRECTIVE
|
||||
#endif
|
||||
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
#include <cet.h>
|
||||
#endif
|
||||
|
@ -14,7 +14,7 @@
|
||||
namespace __sanitizer {
|
||||
|
||||
bool ChainedOriginDepot::ChainedOriginDepotNode::eq(
|
||||
u32 hash, const args_type &args) const {
|
||||
hash_type hash, const args_type &args) const {
|
||||
return here_id == args.here_id && prev_id == args.prev_id;
|
||||
}
|
||||
|
||||
@ -36,7 +36,8 @@ uptr ChainedOriginDepot::ChainedOriginDepotNode::storage_size(
|
||||
split, or one of two reserved values (-1) or (-2). Either case can
|
||||
dominate depending on the workload.
|
||||
*/
|
||||
u32 ChainedOriginDepot::ChainedOriginDepotNode::hash(const args_type &args) {
|
||||
ChainedOriginDepot::ChainedOriginDepotNode::hash_type
|
||||
ChainedOriginDepot::ChainedOriginDepotNode::hash(const args_type &args) {
|
||||
const u32 m = 0x5bd1e995;
|
||||
const u32 seed = 0x9747b28c;
|
||||
const u32 r = 24;
|
||||
@ -67,7 +68,7 @@ bool ChainedOriginDepot::ChainedOriginDepotNode::is_valid(
|
||||
}
|
||||
|
||||
void ChainedOriginDepot::ChainedOriginDepotNode::store(const args_type &args,
|
||||
u32 other_hash) {
|
||||
hash_type other_hash) {
|
||||
here_id = args.here_id;
|
||||
prev_id = args.prev_id;
|
||||
}
|
||||
@ -85,7 +86,9 @@ ChainedOriginDepot::ChainedOriginDepotNode::get_handle() {
|
||||
|
||||
ChainedOriginDepot::ChainedOriginDepot() {}
|
||||
|
||||
StackDepotStats *ChainedOriginDepot::GetStats() { return depot.GetStats(); }
|
||||
StackDepotStats ChainedOriginDepot::GetStats() const {
|
||||
return depot.GetStats();
|
||||
}
|
||||
|
||||
bool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) {
|
||||
ChainedOriginDepotDesc desc = {here_id, prev_id};
|
||||
|
@ -22,7 +22,7 @@ class ChainedOriginDepot {
|
||||
ChainedOriginDepot();
|
||||
|
||||
// Gets the statistic of the origin chain storage.
|
||||
StackDepotStats *GetStats();
|
||||
StackDepotStats GetStats() const;
|
||||
|
||||
// Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
|
||||
// If successful, returns true and the new chain id new_id.
|
||||
@ -43,6 +43,7 @@ class ChainedOriginDepot {
|
||||
};
|
||||
|
||||
struct ChainedOriginDepotNode {
|
||||
using hash_type = u32;
|
||||
ChainedOriginDepotNode *link;
|
||||
u32 id;
|
||||
u32 here_id;
|
||||
@ -50,15 +51,15 @@ class ChainedOriginDepot {
|
||||
|
||||
typedef ChainedOriginDepotDesc args_type;
|
||||
|
||||
bool eq(u32 hash, const args_type &args) const;
|
||||
bool eq(hash_type hash, const args_type &args) const;
|
||||
|
||||
static uptr storage_size(const args_type &args);
|
||||
|
||||
static u32 hash(const args_type &args);
|
||||
static hash_type hash(const args_type &args);
|
||||
|
||||
static bool is_valid(const args_type &args);
|
||||
|
||||
void store(const args_type &args, u32 other_hash);
|
||||
void store(const args_type &args, hash_type other_hash);
|
||||
|
||||
args_type load() const;
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
.globl ASM_WRAPPER_NAME(vfork)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
|
||||
ASM_WRAPPER_NAME(vfork):
|
||||
_CET_ENDBR
|
||||
// Store return address in the spill area and tear down the stack frame.
|
||||
sub $12, %esp
|
||||
call COMMON_INTERCEPTOR_SPILL_AREA
|
||||
|
@ -6,6 +6,7 @@
|
||||
.globl ASM_WRAPPER_NAME(vfork)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
|
||||
ASM_WRAPPER_NAME(vfork):
|
||||
_CET_ENDBR
|
||||
// Store return address in the spill area and tear down the stack frame.
|
||||
push %rcx
|
||||
call COMMON_INTERCEPTOR_SPILL_AREA
|
||||
|
@ -26,9 +26,7 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
|
||||
|
||||
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
|
||||
// Weak default implementation for when sanitizer_stackdepot is not linked in.
|
||||
SANITIZER_WEAK_ATTRIBUTE StackDepotStats *StackDepotGetStats() {
|
||||
return nullptr;
|
||||
}
|
||||
SANITIZER_WEAK_ATTRIBUTE StackDepotStats StackDepotGetStats() { return {}; }
|
||||
|
||||
void *BackgroundThread(void *arg) {
|
||||
const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
|
||||
@ -48,16 +46,12 @@ void *BackgroundThread(void *arg) {
|
||||
prev_reported_rss = current_rss_mb;
|
||||
}
|
||||
// If stack depot has grown 10% since last time, print it too.
|
||||
StackDepotStats *stack_depot_stats = StackDepotGetStats();
|
||||
if (stack_depot_stats) {
|
||||
if (prev_reported_stack_depot_size * 11 / 10 <
|
||||
stack_depot_stats->allocated) {
|
||||
Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
|
||||
SanitizerToolName,
|
||||
stack_depot_stats->n_uniq_ids,
|
||||
stack_depot_stats->allocated >> 20);
|
||||
prev_reported_stack_depot_size = stack_depot_stats->allocated;
|
||||
}
|
||||
StackDepotStats stack_depot_stats = StackDepotGetStats();
|
||||
if (prev_reported_stack_depot_size * 11 / 10 <
|
||||
stack_depot_stats.allocated) {
|
||||
Printf("%s: StackDepot: %zd ids; %zdM allocated\n", SanitizerToolName,
|
||||
stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
|
||||
prev_reported_stack_depot_size = stack_depot_stats.allocated;
|
||||
}
|
||||
}
|
||||
// Check RSS against the limit.
|
||||
|
@ -38,6 +38,30 @@ class MurMur2HashBuilder {
|
||||
return x;
|
||||
}
|
||||
};
|
||||
|
||||
class MurMur2Hash64Builder {
|
||||
static const u64 m = 0xc6a4a7935bd1e995ull;
|
||||
static const u64 seed = 0x9747b28c9747b28cull;
|
||||
static const u64 r = 47;
|
||||
u64 h;
|
||||
|
||||
public:
|
||||
explicit MurMur2Hash64Builder(u64 init = 0) { h = seed ^ (init * m); }
|
||||
void add(u64 k) {
|
||||
k *= m;
|
||||
k ^= k >> r;
|
||||
k *= m;
|
||||
h ^= k;
|
||||
h *= m;
|
||||
}
|
||||
u64 get() {
|
||||
u64 x = h;
|
||||
x ^= x >> r;
|
||||
x *= m;
|
||||
x ^= x >> r;
|
||||
return x;
|
||||
}
|
||||
};
|
||||
} //namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_HASH_H
|
||||
|
@ -759,13 +759,9 @@ u32 GetNumberOfCPUs() {
|
||||
#elif SANITIZER_SOLARIS
|
||||
return sysconf(_SC_NPROCESSORS_ONLN);
|
||||
#else
|
||||
#if defined(CPU_COUNT)
|
||||
cpu_set_t CPUs;
|
||||
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
|
||||
return CPU_COUNT(&CPUs);
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@
|
||||
extern char **environ;
|
||||
#endif
|
||||
|
||||
#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
|
||||
#if defined(__has_include) && __has_include(<os/trace.h>)
|
||||
#define SANITIZER_OS_TRACE 1
|
||||
#include <os/trace.h>
|
||||
#else
|
||||
@ -70,15 +70,7 @@ extern "C" {
|
||||
#include <mach/mach_time.h>
|
||||
#include <mach/vm_statistics.h>
|
||||
#include <malloc/malloc.h>
|
||||
#if defined(__has_builtin) && __has_builtin(__builtin_os_log_format)
|
||||
# include <os/log.h>
|
||||
#else
|
||||
/* Without support for __builtin_os_log_format, fall back to the older
|
||||
method. */
|
||||
# define OS_LOG_DEFAULT 0
|
||||
# define os_log_error(A,B,C) \
|
||||
asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", (C));
|
||||
#endif
|
||||
#include <os/log.h>
|
||||
#include <pthread.h>
|
||||
#include <sched.h>
|
||||
#include <signal.h>
|
||||
|
@ -14,26 +14,6 @@
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
/* TARGET_OS_OSX is not present in SDKs before Darwin16 (macOS 10.12) use
|
||||
TARGET_OS_MAC (we have no support for iOS in any form for these versions,
|
||||
so there's no ambiguity). */
|
||||
#if !defined(TARGET_OS_OSX) && TARGET_OS_MAC
|
||||
# define TARGET_OS_OSX 1
|
||||
#endif
|
||||
|
||||
/* Other TARGET_OS_xxx are not present on earlier versions, define them to
|
||||
0 (we have no support for them; they are not valid targets anyway). */
|
||||
#ifndef TARGET_OS_IOS
|
||||
#define TARGET_OS_IOS 0
|
||||
#endif
|
||||
#ifndef TARGET_OS_TV
|
||||
#define TARGET_OS_TV 0
|
||||
#endif
|
||||
#ifndef TARGET_OS_WATCH
|
||||
#define TARGET_OS_WATCH 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_MAC
|
||||
#include "sanitizer_posix.h"
|
||||
|
||||
|
@ -548,10 +548,10 @@
|
||||
#define SANITIZER_INTERCEPT_SHA1 SI_NETBSD
|
||||
#define SANITIZER_INTERCEPT_MD4 SI_NETBSD
|
||||
#define SANITIZER_INTERCEPT_RMD160 SI_NETBSD
|
||||
#define SANITIZER_INTERCEPT_MD5 SI_NETBSD
|
||||
#define SANITIZER_INTERCEPT_MD5 (SI_NETBSD || SI_FREEBSD)
|
||||
#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD)
|
||||
#define SANITIZER_INTERCEPT_MD2 SI_NETBSD
|
||||
#define SANITIZER_INTERCEPT_SHA2 SI_NETBSD
|
||||
#define SANITIZER_INTERCEPT_SHA2 (SI_NETBSD || SI_FREEBSD)
|
||||
#define SANITIZER_INTERCEPT_CDB SI_NETBSD
|
||||
#define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD)
|
||||
#define SANITIZER_INTERCEPT_POPEN SI_POSIX
|
||||
|
@ -69,6 +69,11 @@
|
||||
#include <semaphore.h>
|
||||
#include <signal.h>
|
||||
#include <stddef.h>
|
||||
#include <md5.h>
|
||||
#include <sha224.h>
|
||||
#include <sha256.h>
|
||||
#include <sha384.h>
|
||||
#include <sha512.h>
|
||||
#include <stdio.h>
|
||||
#include <stringlist.h>
|
||||
#include <term.h>
|
||||
@ -361,6 +366,22 @@ const int si_SEGV_MAPERR = SEGV_MAPERR;
|
||||
const int si_SEGV_ACCERR = SEGV_ACCERR;
|
||||
const int unvis_valid = UNVIS_VALID;
|
||||
const int unvis_validpush = UNVIS_VALIDPUSH;
|
||||
|
||||
const unsigned MD5_CTX_sz = sizeof(MD5_CTX);
|
||||
const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;
|
||||
|
||||
#define SHA2_CONST(LEN) \
|
||||
const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX); \
|
||||
const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \
|
||||
const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH; \
|
||||
const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH
|
||||
|
||||
SHA2_CONST(224);
|
||||
SHA2_CONST(256);
|
||||
SHA2_CONST(384);
|
||||
SHA2_CONST(512);
|
||||
|
||||
#undef SHA2_CONST
|
||||
} // namespace __sanitizer
|
||||
|
||||
using namespace __sanitizer;
|
||||
|
@ -647,6 +647,22 @@ extern unsigned IOCTL_KDSKBMODE;
|
||||
extern const int si_SEGV_MAPERR;
|
||||
extern const int si_SEGV_ACCERR;
|
||||
|
||||
extern const unsigned MD5_CTX_sz;
|
||||
extern const unsigned MD5_return_length;
|
||||
|
||||
#define SHA2_EXTERN(LEN) \
|
||||
extern const unsigned SHA##LEN##_CTX_sz; \
|
||||
extern const unsigned SHA##LEN##_return_length; \
|
||||
extern const unsigned SHA##LEN##_block_length; \
|
||||
extern const unsigned SHA##LEN##_digest_length
|
||||
|
||||
SHA2_EXTERN(224);
|
||||
SHA2_EXTERN(256);
|
||||
SHA2_EXTERN(384);
|
||||
SHA2_EXTERN(512);
|
||||
|
||||
#undef SHA2_EXTERN
|
||||
|
||||
struct __sanitizer_cap_rights {
|
||||
u64 cr_rights[2];
|
||||
};
|
||||
|
@ -26,10 +26,7 @@
|
||||
|
||||
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
|
||||
// are not defined anywhere in userspace headers. Fake them. This seems to work
|
||||
// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat
|
||||
// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
|
||||
// Also, for some platforms (e.g. mips) there are additional members in the
|
||||
// <sys/stat.h> struct stat:s.
|
||||
// fine with newer headers, too.
|
||||
#include <linux/posix_types.h>
|
||||
# if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
|
||||
# include <sys/stat.h>
|
||||
|
@ -83,7 +83,7 @@ const unsigned struct_kernel_stat64_sz = 104;
|
||||
#elif defined(__mips__)
|
||||
const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
|
||||
? FIRST_32_SECOND_64(104, 128)
|
||||
: FIRST_32_SECOND_64(144, 216);
|
||||
: FIRST_32_SECOND_64(160, 216);
|
||||
const unsigned struct_kernel_stat64_sz = 104;
|
||||
#elif defined(__s390__) && !defined(__s390x__)
|
||||
const unsigned struct_kernel_stat_sz = 64;
|
||||
|
@ -19,26 +19,24 @@
|
||||
namespace __sanitizer {
|
||||
|
||||
struct StackDepotNode {
|
||||
using hash_type = u32;
|
||||
StackDepotNode *link;
|
||||
u32 id;
|
||||
atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
|
||||
hash_type stack_hash;
|
||||
u32 size;
|
||||
u32 tag;
|
||||
atomic_uint32_t tag_and_use_count; // tag : 12 high bits; use_count : 20;
|
||||
uptr stack[1]; // [size]
|
||||
|
||||
static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
|
||||
// Lower kTabSizeLog bits are equal for all items in one bucket.
|
||||
// We use these bits to store the per-stack use counter.
|
||||
static const u32 kUseCountBits = kTabSizeLog;
|
||||
static const u32 kUseCountBits = 20;
|
||||
static const u32 kMaxUseCount = 1 << kUseCountBits;
|
||||
static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
|
||||
static const u32 kHashMask = ~kUseCountMask;
|
||||
|
||||
typedef StackTrace args_type;
|
||||
bool eq(u32 hash, const args_type &args) const {
|
||||
u32 hash_bits =
|
||||
atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
|
||||
if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
|
||||
bool eq(hash_type hash, const args_type &args) const {
|
||||
u32 tag =
|
||||
atomic_load(&tag_and_use_count, memory_order_relaxed) >> kUseCountBits;
|
||||
if (stack_hash != hash || args.size != size || args.tag != tag)
|
||||
return false;
|
||||
uptr i = 0;
|
||||
for (; i < size; i++) {
|
||||
@ -49,7 +47,7 @@ struct StackDepotNode {
|
||||
static uptr storage_size(const args_type &args) {
|
||||
return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
|
||||
}
|
||||
static u32 hash(const args_type &args) {
|
||||
static hash_type hash(const args_type &args) {
|
||||
MurMur2HashBuilder H(args.size * sizeof(uptr));
|
||||
for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
|
||||
return H.get();
|
||||
@ -57,13 +55,17 @@ struct StackDepotNode {
|
||||
static bool is_valid(const args_type &args) {
|
||||
return args.size > 0 && args.trace;
|
||||
}
|
||||
void store(const args_type &args, u32 hash) {
|
||||
atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
|
||||
void store(const args_type &args, hash_type hash) {
|
||||
CHECK_EQ(args.tag & (~kUseCountMask >> kUseCountBits), args.tag);
|
||||
atomic_store(&tag_and_use_count, args.tag << kUseCountBits,
|
||||
memory_order_relaxed);
|
||||
stack_hash = hash;
|
||||
size = args.size;
|
||||
tag = args.tag;
|
||||
internal_memcpy(stack, args.trace, size * sizeof(uptr));
|
||||
}
|
||||
args_type load() const {
|
||||
u32 tag =
|
||||
atomic_load(&tag_and_use_count, memory_order_relaxed) >> kUseCountBits;
|
||||
return args_type(&stack[0], size, tag);
|
||||
}
|
||||
StackDepotHandle get_handle() { return StackDepotHandle(this); }
|
||||
@ -71,16 +73,16 @@ struct StackDepotNode {
|
||||
typedef StackDepotHandle handle_type;
|
||||
};
|
||||
|
||||
COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
|
||||
COMPILER_CHECK(StackDepotNode::kMaxUseCount >= (u32)kStackDepotMaxUseCount);
|
||||
|
||||
u32 StackDepotHandle::id() { return node_->id; }
|
||||
int StackDepotHandle::use_count() {
|
||||
return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
|
||||
return atomic_load(&node_->tag_and_use_count, memory_order_relaxed) &
|
||||
StackDepotNode::kUseCountMask;
|
||||
}
|
||||
void StackDepotHandle::inc_use_count_unsafe() {
|
||||
u32 prev =
|
||||
atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
|
||||
atomic_fetch_add(&node_->tag_and_use_count, 1, memory_order_relaxed) &
|
||||
StackDepotNode::kUseCountMask;
|
||||
CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
|
||||
}
|
||||
@ -90,9 +92,7 @@ typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
|
||||
StackDepot;
|
||||
static StackDepot theDepot;
|
||||
|
||||
StackDepotStats *StackDepotGetStats() {
|
||||
return theDepot.GetStats();
|
||||
}
|
||||
StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
|
||||
|
||||
u32 StackDepotPut(StackTrace stack) {
|
||||
StackDepotHandle h = theDepot.Put(stack);
|
||||
@ -127,8 +127,10 @@ bool StackDepotReverseMap::IdDescPair::IdComparator(
|
||||
return a.id < b.id;
|
||||
}
|
||||
|
||||
StackDepotReverseMap::StackDepotReverseMap() {
|
||||
map_.reserve(StackDepotGetStats()->n_uniq_ids + 100);
|
||||
void StackDepotReverseMap::Init() const {
|
||||
if (LIKELY(map_.capacity()))
|
||||
return;
|
||||
map_.reserve(StackDepotGetStats().n_uniq_ids + 100);
|
||||
for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
|
||||
atomic_uintptr_t *p = &theDepot.tab[idx];
|
||||
uptr v = atomic_load(p, memory_order_consume);
|
||||
@ -141,7 +143,8 @@ StackDepotReverseMap::StackDepotReverseMap() {
|
||||
Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
|
||||
}
|
||||
|
||||
StackTrace StackDepotReverseMap::Get(u32 id) {
|
||||
StackTrace StackDepotReverseMap::Get(u32 id) const {
|
||||
Init();
|
||||
if (!map_.size())
|
||||
return StackTrace();
|
||||
IdDescPair pair = {id, nullptr};
|
||||
|
@ -33,7 +33,7 @@ struct StackDepotHandle {
|
||||
|
||||
const int kStackDepotMaxUseCount = 1U << (SANITIZER_ANDROID ? 16 : 20);
|
||||
|
||||
StackDepotStats *StackDepotGetStats();
|
||||
StackDepotStats StackDepotGetStats();
|
||||
u32 StackDepotPut(StackTrace stack);
|
||||
StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
|
||||
// Retrieves a stored stack trace by the id.
|
||||
@ -49,8 +49,8 @@ void StackDepotPrintAll();
|
||||
// which were stored before it was instantiated.
|
||||
class StackDepotReverseMap {
|
||||
public:
|
||||
StackDepotReverseMap();
|
||||
StackTrace Get(u32 id);
|
||||
StackDepotReverseMap() = default;
|
||||
StackTrace Get(u32 id) const;
|
||||
|
||||
private:
|
||||
struct IdDescPair {
|
||||
@ -60,7 +60,9 @@ class StackDepotReverseMap {
|
||||
static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
|
||||
};
|
||||
|
||||
InternalMmapVector<IdDescPair> map_;
|
||||
void Init() const;
|
||||
|
||||
mutable InternalMmapVector<IdDescPair> map_;
|
||||
|
||||
// Disallow evil constructors.
|
||||
StackDepotReverseMap(const StackDepotReverseMap&);
|
||||
|
@ -27,19 +27,20 @@ class StackDepotBase {
|
||||
public:
|
||||
typedef typename Node::args_type args_type;
|
||||
typedef typename Node::handle_type handle_type;
|
||||
typedef typename Node::hash_type hash_type;
|
||||
// Maps stack trace to an unique id.
|
||||
handle_type Put(args_type args, bool *inserted = nullptr);
|
||||
// Retrieves a stored stack trace by the id.
|
||||
args_type Get(u32 id);
|
||||
|
||||
StackDepotStats *GetStats() { return &stats; }
|
||||
StackDepotStats GetStats() const { return stats; }
|
||||
|
||||
void LockAll();
|
||||
void UnlockAll();
|
||||
void PrintAll();
|
||||
|
||||
private:
|
||||
static Node *find(Node *s, args_type args, u32 hash);
|
||||
static Node *find(Node *s, args_type args, hash_type hash);
|
||||
static Node *lock(atomic_uintptr_t *p);
|
||||
static void unlock(atomic_uintptr_t *p, Node *s);
|
||||
|
||||
@ -62,7 +63,7 @@ class StackDepotBase {
|
||||
template <class Node, int kReservedBits, int kTabSizeLog>
|
||||
Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
|
||||
args_type args,
|
||||
u32 hash) {
|
||||
hash_type hash) {
|
||||
// Searches linked list s for the stack, returns its id.
|
||||
for (; s; s = s->link) {
|
||||
if (s->eq(hash, args)) {
|
||||
@ -101,7 +102,7 @@ StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
|
||||
bool *inserted) {
|
||||
if (inserted) *inserted = false;
|
||||
if (!Node::is_valid(args)) return handle_type();
|
||||
uptr h = Node::hash(args);
|
||||
hash_type h = Node::hash(args);
|
||||
atomic_uintptr_t *p = &tab[h % kTabSize];
|
||||
uptr v = atomic_load(p, memory_order_consume);
|
||||
Node *s = (Node *)(v & ~1);
|
||||
|
@ -86,8 +86,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
|
||||
// Nope, this does not look right either. This means the frame after next does
|
||||
// not have a valid frame pointer, but we can still extract the caller PC.
|
||||
// Unfortunately, there is no way to decide between GCC and LLVM frame
|
||||
// layouts. Assume GCC.
|
||||
return bp_prev - 1;
|
||||
// layouts. Assume LLVM.
|
||||
return bp_prev;
|
||||
#else
|
||||
return (uhwptr*)bp;
|
||||
#endif
|
||||
@ -110,21 +110,14 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
|
||||
IsAligned((uptr)frame, sizeof(*frame)) &&
|
||||
size < max_depth) {
|
||||
#ifdef __powerpc__
|
||||
// PowerPC ABIs specify that the return address is saved on the
|
||||
// *caller's* stack frame. Thus we must dereference the back chain
|
||||
// to find the caller frame before extracting it.
|
||||
// PowerPC ABIs specify that the return address is saved at offset
|
||||
// 16 of the *caller's* stack frame. Thus we must dereference the
|
||||
// back chain to find the caller frame before extracting it.
|
||||
uhwptr *caller_frame = (uhwptr*)frame[0];
|
||||
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
|
||||
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
|
||||
break;
|
||||
// For most ABIs the offset where the return address is saved is two
|
||||
// register sizes. The exception is the SVR4 ABI, which uses an
|
||||
// offset of only one register size.
|
||||
#ifdef _CALL_SYSV
|
||||
uhwptr pc1 = caller_frame[1];
|
||||
#else
|
||||
uhwptr pc1 = caller_frame[2];
|
||||
#endif
|
||||
#elif defined(__s390__)
|
||||
uhwptr pc1 = frame[14];
|
||||
#elif defined(__riscv)
|
||||
|
@ -32,16 +32,14 @@ LibIgnore *libignore();
|
||||
|
||||
#if !SANITIZER_GO
|
||||
inline bool in_symbolizer() {
|
||||
cur_thread_init();
|
||||
return UNLIKELY(cur_thread()->in_symbolizer);
|
||||
return UNLIKELY(cur_thread_init()->in_symbolizer);
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace __tsan
|
||||
|
||||
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
|
||||
cur_thread_init(); \
|
||||
ThreadState *thr = cur_thread(); \
|
||||
ThreadState *thr = cur_thread_init(); \
|
||||
const uptr caller_pc = GET_CALLER_PC(); \
|
||||
ScopedInterceptor si(thr, #func, caller_pc); \
|
||||
const uptr pc = GET_CURRENT_PC(); \
|
||||
|
@ -153,7 +153,7 @@ const int SIG_SETMASK = 2;
|
||||
#endif
|
||||
|
||||
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
|
||||
(cur_thread_init(), !cur_thread()->is_inited)
|
||||
(!cur_thread_init()->is_inited)
|
||||
|
||||
namespace __tsan {
|
||||
struct SignalDesc {
|
||||
@ -531,10 +531,7 @@ static void LongJmp(ThreadState *thr, uptr *env) {
|
||||
}
|
||||
|
||||
// FIXME: put everything below into a common extern "C" block?
|
||||
extern "C" void __tsan_setjmp(uptr sp) {
|
||||
cur_thread_init();
|
||||
SetJmp(cur_thread(), sp);
|
||||
}
|
||||
extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
|
||||
|
||||
#if SANITIZER_MAC
|
||||
TSAN_INTERCEPTOR(int, setjmp, void *env);
|
||||
@ -973,8 +970,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
|
||||
void* (*callback)(void *arg) = p->callback;
|
||||
void *param = p->param;
|
||||
{
|
||||
cur_thread_init();
|
||||
ThreadState *thr = cur_thread();
|
||||
ThreadState *thr = cur_thread_init();
|
||||
// Thread-local state is not initialized yet.
|
||||
ScopedIgnoreInterceptors ignore;
|
||||
#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
|
||||
@ -2061,8 +2057,7 @@ static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
|
||||
}
|
||||
|
||||
void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
|
||||
cur_thread_init();
|
||||
ThreadState *thr = cur_thread();
|
||||
ThreadState *thr = cur_thread_init();
|
||||
ThreadSignalContext *sctx = SigCtx(thr);
|
||||
if (sig < 0 || sig >= kSigCount) {
|
||||
VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
|
||||
|
@ -20,10 +20,7 @@
|
||||
|
||||
using namespace __tsan;
|
||||
|
||||
void __tsan_init() {
|
||||
cur_thread_init();
|
||||
Initialize(cur_thread());
|
||||
}
|
||||
void __tsan_init() { Initialize(cur_thread_init()); }
|
||||
|
||||
void __tsan_flush_memory() {
|
||||
FlushShadowMemory();
|
||||
|
@ -124,13 +124,13 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
|
||||
internal_memset(mem, 0, sizeof(mem));
|
||||
GetMemoryProfile(FillProfileCallback, mem, MemCount);
|
||||
auto meta = ctx->metamap.GetMemoryStats();
|
||||
StackDepotStats *stacks = StackDepotGetStats();
|
||||
StackDepotStats stacks = StackDepotGetStats();
|
||||
uptr nthread, nlive;
|
||||
ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
|
||||
uptr internal_stats[AllocatorStatCount];
|
||||
internal_allocator()->GetStats(internal_stats);
|
||||
// All these are allocated from the common mmap region.
|
||||
mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks->allocated +
|
||||
mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks.allocated +
|
||||
internal_stats[AllocatorStatMapped];
|
||||
if (s64(mem[MemMmap]) < 0)
|
||||
mem[MemMmap] = 0;
|
||||
@ -143,8 +143,8 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
|
||||
mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20,
|
||||
mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20,
|
||||
mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20,
|
||||
meta.mem_block >> 20, meta.sync_obj >> 20, stacks->allocated >> 20,
|
||||
stacks->n_uniq_ids, nlive, nthread);
|
||||
meta.mem_block >> 20, meta.sync_obj >> 20, stacks.allocated >> 20,
|
||||
stacks.n_uniq_ids, nlive, nthread);
|
||||
}
|
||||
|
||||
# if SANITIZER_LINUX
|
||||
@ -456,12 +456,14 @@ static void InitializeLongjmpXorKey() {
|
||||
extern "C" void __tsan_tls_initialization() {}
|
||||
|
||||
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
|
||||
// Check that the thr object is in tls;
|
||||
const uptr thr_beg = (uptr)thr;
|
||||
const uptr thr_end = (uptr)thr + sizeof(*thr);
|
||||
// ThreadState is normally allocated in TLS and is large,
|
||||
// so we skip it. But unit tests allocate ThreadState outside of TLS.
|
||||
if (thr_beg < tls_addr || thr_end >= tls_addr + tls_size)
|
||||
return;
|
||||
CHECK_GE(thr_beg, tls_addr);
|
||||
CHECK_LE(thr_beg, tls_addr + tls_size);
|
||||
CHECK_GE(thr_end, tls_addr);
|
||||
CHECK_LE(thr_end, tls_addr + tls_size);
|
||||
// Since the thr object is huge, skip it.
|
||||
const uptr pc = StackTrace::GetNextInstructionPc(
|
||||
reinterpret_cast<uptr>(__tsan_tls_initialization));
|
||||
MemoryRangeImitateWrite(thr, pc, tls_addr, thr_beg - tls_addr);
|
||||
|
@ -159,35 +159,35 @@ void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
|
||||
RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &app_res, &app_dirty);
|
||||
#endif
|
||||
|
||||
StackDepotStats *stacks = StackDepotGetStats();
|
||||
StackDepotStats stacks = StackDepotGetStats();
|
||||
uptr nthread, nlive;
|
||||
ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
|
||||
internal_snprintf(buf, buf_size,
|
||||
"shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
"meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
"traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
#if !SANITIZER_GO
|
||||
"low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
"high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
"heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
#else // !SANITIZER_GO
|
||||
internal_snprintf(
|
||||
buf, buf_size,
|
||||
"shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
"meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
"traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
# if !SANITIZER_GO
|
||||
"low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
"high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
"heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
# else // !SANITIZER_GO
|
||||
"app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
|
||||
#endif
|
||||
"stacks: %zd unique IDs, %zd kB allocated\n"
|
||||
"threads: %zd total, %zd live\n"
|
||||
"------------------------------\n",
|
||||
ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
|
||||
MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
|
||||
TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
|
||||
#if !SANITIZER_GO
|
||||
LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
|
||||
HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
|
||||
HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
|
||||
#else // !SANITIZER_GO
|
||||
# endif
|
||||
"stacks: %zd unique IDs, %zd kB allocated\n"
|
||||
"threads: %zd total, %zd live\n"
|
||||
"------------------------------\n",
|
||||
ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
|
||||
MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
|
||||
TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
|
||||
# if !SANITIZER_GO
|
||||
LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
|
||||
HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
|
||||
HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
|
||||
# else // !SANITIZER_GO
|
||||
LoAppMemBeg(), LoAppMemEnd(), app_res / 1024, app_dirty / 1024,
|
||||
#endif
|
||||
stacks->n_uniq_ids, stacks->allocated / 1024,
|
||||
nthread, nlive);
|
||||
# endif
|
||||
stacks.n_uniq_ids, stacks.allocated / 1024, nthread, nlive);
|
||||
}
|
||||
|
||||
# if !SANITIZER_GO
|
||||
@ -283,13 +283,17 @@ uptr ExtractLongJmpSp(uptr *env) {
|
||||
}
|
||||
|
||||
#if !SANITIZER_GO
|
||||
extern "C" void __tsan_tls_initialization() {}
|
||||
|
||||
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
|
||||
// The pointer to the ThreadState object is stored in the shadow memory
|
||||
// of the tls.
|
||||
uptr tls_end = tls_addr + tls_size;
|
||||
uptr thread_identity = (uptr)pthread_self();
|
||||
const uptr pc = StackTrace::GetNextInstructionPc(
|
||||
reinterpret_cast<uptr>(__tsan_tls_initialization));
|
||||
if (thread_identity == main_thread_identity) {
|
||||
MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size);
|
||||
MemoryRangeImitateWrite(thr, pc, tls_addr, tls_size);
|
||||
} else {
|
||||
uptr thr_state_start = thread_identity;
|
||||
uptr thr_state_end = thr_state_start + sizeof(uptr);
|
||||
@ -297,10 +301,8 @@ void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
|
||||
CHECK_LE(thr_state_start, tls_addr + tls_size);
|
||||
CHECK_GE(thr_state_end, tls_addr);
|
||||
CHECK_LE(thr_state_end, tls_addr + tls_size);
|
||||
MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr,
|
||||
thr_state_start - tls_addr);
|
||||
MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end,
|
||||
tls_end - thr_state_end);
|
||||
MemoryRangeImitateWrite(thr, pc, tls_addr, thr_state_start - tls_addr);
|
||||
MemoryRangeImitateWrite(thr, pc, thr_state_end, tls_end - thr_state_end);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -43,9 +43,10 @@ int (*on_finalize)(int);
|
||||
|
||||
#if !SANITIZER_GO && !SANITIZER_MAC
|
||||
__attribute__((tls_model("initial-exec")))
|
||||
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
|
||||
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
|
||||
SANITIZER_CACHE_LINE_SIZE);
|
||||
#endif
|
||||
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
|
||||
static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
|
||||
Context *ctx;
|
||||
|
||||
// Can be overriden by a front-end.
|
||||
@ -195,8 +196,7 @@ static void *BackgroundThread(void *arg) {
|
||||
// We don't use ScopedIgnoreInterceptors, because we want ignores to be
|
||||
// enabled even when the thread function exits (e.g. during pthread thread
|
||||
// shutdown code).
|
||||
cur_thread_init();
|
||||
cur_thread()->ignore_interceptors++;
|
||||
cur_thread_init()->ignore_interceptors++;
|
||||
const u64 kMs2Ns = 1000 * 1000;
|
||||
const u64 start = NanoTime();
|
||||
|
||||
|
@ -230,23 +230,24 @@ struct ThreadState {
|
||||
ThreadState *cur_thread();
|
||||
void set_cur_thread(ThreadState *thr);
|
||||
void cur_thread_finalize();
|
||||
inline void cur_thread_init() { }
|
||||
#else
|
||||
inline ThreadState *cur_thread_init() { return cur_thread(); }
|
||||
# else
|
||||
__attribute__((tls_model("initial-exec")))
|
||||
extern THREADLOCAL char cur_thread_placeholder[];
|
||||
inline ThreadState *cur_thread() {
|
||||
return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
|
||||
}
|
||||
inline void cur_thread_init() {
|
||||
inline ThreadState *cur_thread_init() {
|
||||
ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
|
||||
if (UNLIKELY(!thr->current))
|
||||
thr->current = thr;
|
||||
return thr->current;
|
||||
}
|
||||
inline void set_cur_thread(ThreadState *thr) {
|
||||
reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
|
||||
}
|
||||
inline void cur_thread_finalize() { }
|
||||
#endif // SANITIZER_MAC || SANITIZER_ANDROID
|
||||
# endif // SANITIZER_MAC || SANITIZER_ANDROID
|
||||
#endif // SANITIZER_GO
|
||||
|
||||
class ThreadContext final : public ThreadContextBase {
|
||||
|
@ -13,6 +13,7 @@ ASM_HIDDEN(__tsan_trace_switch)
|
||||
.globl ASM_SYMBOL(__tsan_trace_switch_thunk)
|
||||
ASM_SYMBOL(__tsan_trace_switch_thunk):
|
||||
CFI_STARTPROC
|
||||
_CET_ENDBR
|
||||
# Save scratch registers.
|
||||
push %rax
|
||||
CFI_ADJUST_CFA_OFFSET(8)
|
||||
@ -93,6 +94,7 @@ ASM_HIDDEN(__tsan_report_race)
|
||||
.globl ASM_SYMBOL(__tsan_report_race_thunk)
|
||||
ASM_SYMBOL(__tsan_report_race_thunk):
|
||||
CFI_STARTPROC
|
||||
_CET_ENDBR
|
||||
# Save scratch registers.
|
||||
push %rax
|
||||
CFI_ADJUST_CFA_OFFSET(8)
|
||||
@ -185,6 +187,7 @@ ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
|
||||
ASM_SYMBOL_INTERCEPTOR(setjmp):
|
||||
#endif
|
||||
CFI_STARTPROC
|
||||
_CET_ENDBR
|
||||
// save env parameter
|
||||
push %rdi
|
||||
CFI_ADJUST_CFA_OFFSET(8)
|
||||
@ -226,6 +229,7 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
|
||||
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
|
||||
ASM_SYMBOL_INTERCEPTOR(_setjmp):
|
||||
CFI_STARTPROC
|
||||
_CET_ENDBR
|
||||
// save env parameter
|
||||
push %rdi
|
||||
CFI_ADJUST_CFA_OFFSET(8)
|
||||
@ -267,6 +271,7 @@ ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
|
||||
ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
|
||||
#endif
|
||||
CFI_STARTPROC
|
||||
_CET_ENDBR
|
||||
// save env parameter
|
||||
push %rdi
|
||||
CFI_ADJUST_CFA_OFFSET(8)
|
||||
@ -323,6 +328,7 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
|
||||
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
|
||||
ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
|
||||
CFI_STARTPROC
|
||||
_CET_ENDBR
|
||||
// save env parameter
|
||||
push %rdi
|
||||
CFI_ADJUST_CFA_OFFSET(8)
|
||||
|
@ -1,6 +1,5 @@
|
||||
#include "tsan_ppc_regs.h"
|
||||
|
||||
.machine altivec
|
||||
.section .text
|
||||
.hidden __tsan_setjmp
|
||||
.globl _setjmp
|
||||
|
@ -50,7 +50,6 @@ void InitializeFlags() {
|
||||
{
|
||||
CommonFlags cf;
|
||||
cf.CopyFrom(*common_flags());
|
||||
cf.print_summary = false;
|
||||
cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH");
|
||||
OverrideCommonFlags(cf);
|
||||
}
|
||||
|
@ -894,21 +894,6 @@ void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
|
||||
|
||||
} // namespace __ubsan
|
||||
|
||||
void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData,
|
||||
ValueHandle Function) {
|
||||
GET_REPORT_OPTIONS(false);
|
||||
CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
|
||||
handleCFIBadIcall(&Data, Function, Opts);
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData,
|
||||
ValueHandle Function) {
|
||||
GET_REPORT_OPTIONS(true);
|
||||
CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
|
||||
handleCFIBadIcall(&Data, Function, Opts);
|
||||
Die();
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
|
||||
ValueHandle Value,
|
||||
uptr ValidVtable) {
|
||||
|
@ -215,20 +215,12 @@ enum CFITypeCheckKind : unsigned char {
|
||||
CFITCK_VMFCall,
|
||||
};
|
||||
|
||||
struct CFIBadIcallData {
|
||||
SourceLocation Loc;
|
||||
const TypeDescriptor &Type;
|
||||
};
|
||||
|
||||
struct CFICheckFailData {
|
||||
CFITypeCheckKind CheckKind;
|
||||
SourceLocation Loc;
|
||||
const TypeDescriptor &Type;
|
||||
};
|
||||
|
||||
/// \brief Handle control flow integrity failure for indirect function calls.
|
||||
RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
|
||||
|
||||
/// \brief Handle control flow integrity failures.
|
||||
RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
|
||||
uptr VtableIsValid)
|
||||
|
@ -12,7 +12,6 @@
|
||||
#ifndef UBSAN_PLATFORM_H
|
||||
#define UBSAN_PLATFORM_H
|
||||
|
||||
#ifndef CAN_SANITIZE_UB
|
||||
// Other platforms should be easy to add, and probably work as-is.
|
||||
#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
|
||||
defined(__NetBSD__) || defined(__DragonFly__) || \
|
||||
@ -22,6 +21,5 @@
|
||||
#else
|
||||
# define CAN_SANITIZE_UB 0
|
||||
#endif
|
||||
#endif //CAN_SANITIZE_UB
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user