libsanitizer merge from upstream r250806.

libsanitizer/

2015-10-20  Maxim Ostapenko  <m.ostapenko@partner.samsung.com>

	* All source files: Merge from upstream r250806.
	* configure.ac (link_sanitizer_common): Add -lrt flag.
	* configure.tgt: Enable TSAN and LSAN for aarch64-linux targets.
	Set CXX_ABI_NEEDED=true for darwin.
	* asan/Makefile.am (asan_files): Add new files.
	(DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy
	DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0.
	* asan/Makefile.in: Regenerate.
	* ubsan/Makefile.am (ubsan_files): Add new files.
	(DEFS): Add DCAN_SANITIZE_UB=1.
	(libubsan_la_LIBADD): Add -lc++abi if CXX_ABI_NEEDED is true.
	* ubsan/Makefile.in: Regenerate.
	* tsan/Makefile.am (tsan_files): Add new files.
	(DEFS): Add DCAN_SANITIZE_UB=0.
	* tsan/Makefile.in: Regenerate.
	* sanitizer_common/Makefile.am (sanitizer_common_files): Add new files.
	* sanitizer_common/Makefile.in: Regenerate.
	* asan/libtool-version: Bump the libasan SONAME.

From-SVN: r229111
This commit is contained in:
Max Ostapenko 2015-10-21 10:32:45 +03:00 committed by Maxim Ostapenko
parent 013a8899f5
commit 696d846a56
218 changed files with 12360 additions and 6496 deletions

View File

@ -1,3 +1,24 @@
2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
* All source files: Merge from upstream r250806.
* configure.ac (link_sanitizer_common): Add -lrt flag.
* configure.tgt: Enable TSAN and LSAN for aarch64-linux targets.
Set USE_CXX_ABI_FLAG=true for darwin.
* asan/Makefile.am (asan_files): Add new files.
(DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy
DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0.
* asan/Makefile.in: Regenerate.
* ubsan/Makefile.am (ubsan_files): Add new files.
(DEFS): Add DCAN_SANITIZE_UB=1.
(libubsan_la_LIBADD): Add -lc++abi if USE_CXX_ABI_FLAG is true.
* ubsan/Makefile.in: Regenerate.
* tsan/Makefile.am (tsan_files): Add new files.
(DEFS): Add DCAN_SANITIZE_UB=0.
* tsan/Makefile.in: Regenerate.
* sanitizer_common/Makefile.am (sanitizer_common_files): Add new files.
* sanitizer_common/Makefile.in: Regenerate.
* asan/libtool-version: Bump the libasan SONAME.
2015-09-09 Markus Trippelsdorf <markus@trippelsdorf.de> 2015-09-09 Markus Trippelsdorf <markus@trippelsdorf.de>
PR sanitizer/67258 PR sanitizer/67258

View File

@ -1,4 +1,4 @@
221802 250806
The first line of this file holds the svn revision number of the The first line of this file holds the svn revision number of the
last merge done from the master library sources. last merge done from the master library sources.

View File

@ -3,7 +3,7 @@ AM_CPPFLAGS = -I $(top_srcdir)/include -I $(top_srcdir)
# May be used by toolexeclibdir. # May be used by toolexeclibdir.
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER)
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DASAN_HAS_EXCEPTIONS=1 -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 -DASAN_NEEDS_SEGV=1 DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DASAN_HAS_EXCEPTIONS=1 -DASAN_NEEDS_SEGV=1 -DCAN_SANITIZE_UB=0
if USING_MAC_INTERPOSE if USING_MAC_INTERPOSE
DEFS += -DMAC_INTERPOSE_FUNCTIONS -DMISSING_BLOCKS_SUPPORT DEFS += -DMAC_INTERPOSE_FUNCTIONS -DMISSING_BLOCKS_SUPPORT
endif endif
@ -17,9 +17,10 @@ nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \ asan_files = \
asan_activation.cc \ asan_activation.cc \
asan_allocator2.cc \ asan_allocator.cc \
asan_debugging.cc \ asan_debugging.cc \
asan_fake_stack.cc \ asan_fake_stack.cc \
asan_flags.cc \
asan_globals.cc \ asan_globals.cc \
asan_interceptors.cc \ asan_interceptors.cc \
asan_linux.cc \ asan_linux.cc \
@ -34,6 +35,7 @@ asan_files = \
asan_rtl.cc \ asan_rtl.cc \
asan_stack.cc \ asan_stack.cc \
asan_stats.cc \ asan_stats.cc \
asan_suppressions.cc \
asan_thread.cc \ asan_thread.cc \
asan_win.cc \ asan_win.cc \
asan_win_dll_thunk.cc \ asan_win_dll_thunk.cc \

View File

@ -111,14 +111,14 @@ libasan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \ $(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \ $(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
$(am__append_3) $(am__DEPENDENCIES_1) $(am__append_3) $(am__DEPENDENCIES_1)
am__objects_1 = asan_activation.lo asan_allocator2.lo \ am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \
asan_debugging.lo asan_fake_stack.lo asan_globals.lo \ asan_fake_stack.lo asan_flags.lo asan_globals.lo \
asan_interceptors.lo asan_linux.lo asan_mac.lo \ asan_interceptors.lo asan_linux.lo asan_mac.lo \
asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \ asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
asan_new_delete.lo asan_poisoning.lo asan_posix.lo \ asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \ asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
asan_thread.lo asan_win.lo asan_win_dll_thunk.lo \ asan_suppressions.lo asan_thread.lo asan_win.lo \
asan_win_dynamic_runtime_thunk.lo asan_win_dll_thunk.lo asan_win_dynamic_runtime_thunk.lo
am_libasan_la_OBJECTS = $(am__objects_1) am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS) libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@ -172,8 +172,8 @@ CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@ CYGPATH_W = @CYGPATH_W@
DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS \ DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS \
-D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS \ -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS \
-DASAN_HAS_EXCEPTIONS=1 -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 \ -DASAN_HAS_EXCEPTIONS=1 -DASAN_NEEDS_SEGV=1 \
-DASAN_NEEDS_SEGV=1 $(am__append_1) -DCAN_SANITIZE_UB=0 $(am__append_1)
DEPDIR = @DEPDIR@ DEPDIR = @DEPDIR@
DSYMUTIL = @DSYMUTIL@ DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@ DUMPBIN = @DUMPBIN@
@ -306,9 +306,10 @@ toolexeclib_LTLIBRARIES = libasan.la
nodist_toolexeclib_HEADERS = libasan_preinit.o nodist_toolexeclib_HEADERS = libasan_preinit.o
asan_files = \ asan_files = \
asan_activation.cc \ asan_activation.cc \
asan_allocator2.cc \ asan_allocator.cc \
asan_debugging.cc \ asan_debugging.cc \
asan_fake_stack.cc \ asan_fake_stack.cc \
asan_flags.cc \
asan_globals.cc \ asan_globals.cc \
asan_interceptors.cc \ asan_interceptors.cc \
asan_linux.cc \ asan_linux.cc \
@ -323,6 +324,7 @@ asan_files = \
asan_rtl.cc \ asan_rtl.cc \
asan_stack.cc \ asan_stack.cc \
asan_stats.cc \ asan_stats.cc \
asan_suppressions.cc \
asan_thread.cc \ asan_thread.cc \
asan_win.cc \ asan_win.cc \
asan_win_dll_thunk.cc \ asan_win_dll_thunk.cc \
@ -450,9 +452,10 @@ distclean-compile:
-rm -f *.tab.c -rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_linux.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_linux.Plo@am__quote@
@ -467,6 +470,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_rtl.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_rtl.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dll_thunk.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dll_thunk.Plo@am__quote@

View File

@ -14,32 +14,106 @@
#include "asan_allocator.h" #include "asan_allocator.h"
#include "asan_flags.h" #include "asan_flags.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_poisoning.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
namespace __asan { namespace __asan {
static struct AsanDeactivatedFlags { static struct AsanDeactivatedFlags {
int quarantine_size; AllocatorOptions allocator_options;
int max_redzone;
int malloc_context_size; int malloc_context_size;
bool poison_heap; bool poison_heap;
bool coverage;
const char *coverage_dir;
void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) {
#define ASAN_ACTIVATION_FLAG(Type, Name) \
RegisterFlag(parser, #Name, "", &f->Name);
#define COMMON_ACTIVATION_FLAG(Type, Name) \
RegisterFlag(parser, #Name, "", &cf->Name);
#include "asan_activation_flags.inc"
#undef ASAN_ACTIVATION_FLAG
#undef COMMON_ACTIVATION_FLAG
RegisterIncludeFlags(parser, cf);
}
void OverrideFromActivationFlags() {
Flags f;
CommonFlags cf;
FlagParser parser;
RegisterActivationFlags(&parser, &f, &cf);
// Copy the current activation flags.
allocator_options.CopyTo(&f, &cf);
cf.malloc_context_size = malloc_context_size;
f.poison_heap = poison_heap;
cf.coverage = coverage;
cf.coverage_dir = coverage_dir;
cf.verbosity = Verbosity();
cf.help = false; // this is activation-specific help
// Check if activation flags need to be overriden.
if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) {
parser.ParseString(env);
}
// Override from getprop asan.options.
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
parser.ParseString(buf);
SetVerbosity(cf.verbosity);
if (Verbosity()) ReportUnrecognizedFlags();
if (cf.help) parser.PrintFlagDescriptions();
allocator_options.SetFrom(&f, &cf);
malloc_context_size = cf.malloc_context_size;
poison_heap = f.poison_heap;
coverage = cf.coverage;
coverage_dir = cf.coverage_dir;
}
void Print() {
Report(
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
"allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
allocator_options.may_return_null, coverage, coverage_dir);
}
} asan_deactivated_flags; } asan_deactivated_flags;
static bool asan_is_deactivated; static bool asan_is_deactivated;
void AsanStartDeactivated() { void AsanDeactivate() {
CHECK(!asan_is_deactivated);
VReport(1, "Deactivating ASan\n"); VReport(1, "Deactivating ASan\n");
// Save flag values.
asan_deactivated_flags.quarantine_size = flags()->quarantine_size;
asan_deactivated_flags.max_redzone = flags()->max_redzone;
asan_deactivated_flags.poison_heap = flags()->poison_heap;
asan_deactivated_flags.malloc_context_size =
common_flags()->malloc_context_size;
flags()->quarantine_size = 0; // Stash runtime state.
flags()->max_redzone = 16; GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
flags()->poison_heap = false; asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
common_flags()->malloc_context_size = 0; asan_deactivated_flags.poison_heap = CanPoisonMemory();
asan_deactivated_flags.coverage = common_flags()->coverage;
asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir;
// Deactivate the runtime.
SetCanPoisonMemory(false);
SetMallocContextSize(1);
ReInitializeCoverage(false, nullptr);
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
disabled.quarantine_size_mb = 0;
disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
disabled.max_redzone = 16;
disabled.alloc_dealloc_mismatch = false;
disabled.may_return_null = true;
ReInitializeAllocator(disabled);
asan_is_deactivated = true; asan_is_deactivated = true;
} }
@ -48,25 +122,21 @@ void AsanActivate() {
if (!asan_is_deactivated) return; if (!asan_is_deactivated) return;
VReport(1, "Activating ASan\n"); VReport(1, "Activating ASan\n");
// Restore flag values. UpdateProcessName();
// FIXME: this is not atomic, and there may be other threads alive.
flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
flags()->max_redzone = asan_deactivated_flags.max_redzone;
flags()->poison_heap = asan_deactivated_flags.poison_heap;
common_flags()->malloc_context_size =
asan_deactivated_flags.malloc_context_size;
ParseExtraActivationFlags(); asan_deactivated_flags.OverrideFromActivationFlags();
ReInitializeAllocator(); SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
ReInitializeCoverage(asan_deactivated_flags.coverage,
asan_deactivated_flags.coverage_dir);
ReInitializeAllocator(asan_deactivated_flags.allocator_options);
asan_is_deactivated = false; asan_is_deactivated = false;
VReport( if (Verbosity()) {
1, Report("Activated with flags:\n");
"quarantine_size %d, max_redzone %d, poison_heap %d, malloc_context_size " asan_deactivated_flags.Print();
"%d\n", }
flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
common_flags()->malloc_context_size);
} }
} // namespace __asan } // namespace __asan

View File

@ -14,7 +14,7 @@
#define ASAN_ACTIVATION_H #define ASAN_ACTIVATION_H
namespace __asan { namespace __asan {
void AsanStartDeactivated(); void AsanDeactivate();
void AsanActivate(); void AsanActivate();
} // namespace __asan } // namespace __asan

View File

@ -0,0 +1,33 @@
//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// A subset of ASan (and common) runtime flags supported at activation time.
//
//===----------------------------------------------------------------------===//
#ifndef ASAN_ACTIVATION_FLAG
# error "Define ASAN_ACTIVATION_FLAG prior to including this file!"
#endif
#ifndef COMMON_ACTIVATION_FLAG
# error "Define COMMON_ACTIVATION_FLAG prior to including this file!"
#endif
// ASAN_ACTIVATION_FLAG(Type, Name)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
ASAN_ACTIVATION_FLAG(int, redzone)
ASAN_ACTIVATION_FLAG(int, max_redzone)
ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
ASAN_ACTIVATION_FLAG(bool, poison_heap)
COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null)
COMMON_ACTIVATION_FLAG(int, malloc_context_size)
COMMON_ACTIVATION_FLAG(bool, coverage)
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)

View File

@ -0,0 +1,906 @@
//===-- asan_allocator.cc -------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Implementation of ASan's memory allocator, 2-nd version.
// This variant uses the allocator from sanitizer_common, i.e. the one shared
// with ThreadSanitizer and MemorySanitizer.
//
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
#include "lsan/lsan_common.h"
namespace __asan {
// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
// We use adaptive redzones: for larger allocation larger redzones are used.
static u32 RZLog2Size(u32 rz_log) {
CHECK_LT(rz_log, 8);
return 16 << rz_log;
}
static u32 RZSize2Log(u32 rz_size) {
CHECK_GE(rz_size, 16);
CHECK_LE(rz_size, 2048);
CHECK(IsPowerOfTwo(rz_size));
u32 res = Log2(rz_size) - 4;
CHECK_EQ(rz_size, RZLog2Size(res));
return res;
}
static AsanAllocator &get_allocator();
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
// U -- user memory.
// R -- right redzone (0 or more bytes)
// ChunkBase consists of ChunkHeader and other bytes that overlap with user
// memory.
// If the left redzone is greater than the ChunkHeader size we store a magic
// value in the first uptr word of the memory block and store the address of
// ChunkBase in the next uptr.
// M B L L L L L L L L L H H U U U U U U
// | ^
// ---------------------|
// M -- magic value kAllocBegMagic
// B -- address of ChunkHeader pointing to the first 'H'
static const uptr kAllocBegMagic = 0xCC6E96B9;
struct ChunkHeader {
// 1-st 8 bytes.
u32 chunk_state : 8; // Must be first.
u32 alloc_tid : 24;
u32 free_tid : 24;
u32 from_memalign : 1;
u32 alloc_type : 2;
u32 rz_log : 3;
u32 lsan_tag : 2;
// 2-nd 8 bytes
// This field is used for small sizes. For large sizes it is equal to
// SizeClassMap::kMaxSize and the actual size is stored in the
// SecondaryAllocator's metadata.
u32 user_requested_size;
u32 alloc_context_id;
};
struct ChunkBase : ChunkHeader {
// Header2, intersects with user memory.
u32 free_context_id;
};
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
COMPILER_CHECK(kChunkHeaderSize == 16);
COMPILER_CHECK(kChunkHeader2Size <= 16);
// Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
enum {
CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
CHUNK_ALLOCATED = 2,
CHUNK_QUARANTINE = 3
};
struct AsanChunk: ChunkBase {
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
uptr UsedSize(bool locked_version = false) {
if (user_requested_size != SizeClassMap::kMaxSize)
return user_requested_size;
return *reinterpret_cast<uptr *>(
get_allocator().GetMetaData(AllocBeg(locked_version)));
}
void *AllocBeg(bool locked_version = false) {
if (from_memalign) {
if (locked_version)
return get_allocator().GetBlockBeginFastLocked(
reinterpret_cast<void *>(this));
return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
}
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
bool AddrIsInside(uptr addr, bool locked_version = false) {
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
}
};
struct QuarantineCallback {
explicit QuarantineCallback(AllocatorCache *cache)
: cache_(cache) {
}
void Recycle(AsanChunk *m) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
CHECK_NE(m->alloc_tid, kInvalidTid);
CHECK_NE(m->free_tid, kInvalidTid);
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapLeftRedzoneMagic);
void *p = reinterpret_cast<void *>(m->AllocBeg());
if (p != m) {
uptr *alloc_magic = reinterpret_cast<uptr *>(p);
CHECK_EQ(alloc_magic[0], kAllocBegMagic);
// Clear the magic value, as allocator internals may overwrite the
// contents of deallocated chunk, confusing GetAsanChunk lookup.
alloc_magic[0] = 0;
CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
}
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.real_frees++;
thread_stats.really_freed += m->UsedSize();
get_allocator().Deallocate(cache_, p);
}
void *Allocate(uptr size) {
return get_allocator().Allocate(cache_, size, 1, false);
}
void Deallocate(void *p) {
get_allocator().Deallocate(cache_, p);
}
AllocatorCache *cache_;
};
typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
typedef AsanQuarantine::Cache QuarantineCache;
void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
PoisonShadow(p, size, 0);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
FlushUnneededASanShadowMemory(p, size);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.munmaps++;
thread_stats.munmaped += size;
}
// We can not use THREADLOCAL because it is not supported on some of the
// platforms we care about (OSX 10.6, Android).
// static THREADLOCAL AllocatorCache cache;
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
return &ms->allocator_cache;
}
QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
}
void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
quarantine_size_mb = f->quarantine_size_mb;
min_redzone = f->redzone;
max_redzone = f->max_redzone;
may_return_null = cf->allocator_may_return_null;
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
f->quarantine_size_mb = quarantine_size_mb;
f->redzone = min_redzone;
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
}
struct Allocator {
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 1UL << 40);
static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20);
AsanAllocator allocator;
AsanQuarantine quarantine;
StaticSpinMutex fallback_mutex;
AllocatorCache fallback_allocator_cache;
QuarantineCache fallback_quarantine_cache;
// ------------------- Options --------------------------
atomic_uint16_t min_redzone;
atomic_uint16_t max_redzone;
atomic_uint8_t alloc_dealloc_mismatch;
// ------------------- Initialization ------------------------
explicit Allocator(LinkerInitialized)
: quarantine(LINKER_INITIALIZED),
fallback_quarantine_cache(LINKER_INITIALIZED) {}
void CheckOptions(const AllocatorOptions &options) const {
CHECK_GE(options.min_redzone, 16);
CHECK_GE(options.max_redzone, options.min_redzone);
CHECK_LE(options.max_redzone, 2048);
CHECK(IsPowerOfTwo(options.min_redzone));
CHECK(IsPowerOfTwo(options.max_redzone));
}
void SharedInitCode(const AllocatorOptions &options) {
CheckOptions(options);
quarantine.Init((uptr)options.quarantine_size_mb << 20,
kMaxThreadLocalQuarantine);
atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
memory_order_release);
atomic_store(&min_redzone, options.min_redzone, memory_order_release);
atomic_store(&max_redzone, options.max_redzone, memory_order_release);
}
void Initialize(const AllocatorOptions &options) {
allocator.Init(options.may_return_null);
SharedInitCode(options);
}
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
SharedInitCode(options);
}
void GetOptions(AllocatorOptions *options) const {
options->quarantine_size_mb = quarantine.GetSize() >> 20;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
options->may_return_null = allocator.MayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
}
// -------------------- Helper methods. -------------------------
uptr ComputeRZLog(uptr user_requested_size) {
u32 rz_log =
user_requested_size <= 64 - 16 ? 0 :
user_requested_size <= 128 - 32 ? 1 :
user_requested_size <= 512 - 64 ? 2 :
user_requested_size <= 4096 - 128 ? 3 :
user_requested_size <= (1 << 14) - 256 ? 4 :
user_requested_size <= (1 << 15) - 512 ? 5 :
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
}
// We have an address between two chunks, and we want to report just one.
AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
AsanChunk *right_chunk) {
// Prefer an allocated chunk over freed chunk and freed chunk
// over available chunk.
if (left_chunk->chunk_state != right_chunk->chunk_state) {
if (left_chunk->chunk_state == CHUNK_ALLOCATED)
return left_chunk;
if (right_chunk->chunk_state == CHUNK_ALLOCATED)
return right_chunk;
if (left_chunk->chunk_state == CHUNK_QUARANTINE)
return left_chunk;
if (right_chunk->chunk_state == CHUNK_QUARANTINE)
return right_chunk;
}
// Same chunk_state: choose based on offset.
sptr l_offset = 0, r_offset = 0;
CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
if (l_offset < r_offset)
return left_chunk;
return right_chunk;
}
// -------------------- Allocation/Deallocation routines ---------------
void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type, bool can_fill) {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
if (alignment < min_alignment)
alignment = min_alignment;
if (size == 0) {
// We'd be happy to avoid allocating memory for zero-size requests, but
// some programs/tests depend on this behavior and assume that malloc
// would not return NULL even for zero-size allocations. Moreover, it
// looks like operator new should never return NULL, and results of
// consecutive "new" calls must be different even if the allocated size
// is zero.
size = 1;
}
CHECK(IsPowerOfTwo(alignment));
uptr rz_log = ComputeRZLog(size);
uptr rz_size = RZLog2Size(rz_log);
uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
bool using_primary_allocator = true;
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
needed_size += rz_size;
using_primary_allocator = false;
}
CHECK(IsAligned(needed_size, min_alignment));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
return allocator.ReturnNullOrDie();
}
AsanThread *t = GetCurrentThread();
void *allocated;
bool check_rss_limit = true;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocated =
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated =
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
}
if (!allocated)
return allocator.ReturnNullOrDie();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
// chunk. This is possible if CanPoisonMemory() was false for some
// time, for example, due to flags()->start_disabled.
// Anyway, poison the block before using it for anything else.
uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
}
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
uptr user_beg = beg_plus_redzone;
if (!IsAligned(user_beg, alignment))
user_beg = RoundUpTo(user_beg, alignment);
uptr user_end = user_beg + size;
CHECK_LE(user_end, alloc_end);
uptr chunk_beg = user_beg - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
m->alloc_type = alloc_type;
m->rz_log = rz_log;
u32 alloc_tid = t ? t->tid() : 0;
m->alloc_tid = alloc_tid;
CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
m->free_tid = kInvalidTid;
m->from_memalign = user_beg != beg_plus_redzone;
if (alloc_beg != chunk_beg) {
CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
}
if (using_primary_allocator) {
CHECK(size);
m->user_requested_size = size;
CHECK(allocator.FromPrimary(allocated));
} else {
CHECK(!allocator.FromPrimary(allocated));
m->user_requested_size = SizeClassMap::kMaxSize;
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
meta[0] = size;
meta[1] = chunk_beg;
}
m->alloc_context_id = StackDepotPut(*stack);
uptr size_rounded_down_to_granularity =
RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
if (size_rounded_down_to_granularity)
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
// Deal with the end of the region if size is not aligned to granularity.
if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
u8 *shadow =
(u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
*shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
}
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mallocs++;
thread_stats.malloced += size;
thread_stats.malloced_redzones += needed_size - size;
if (needed_size > SizeClassMap::kMaxSize)
thread_stats.malloc_large++;
else
thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
void *res = reinterpret_cast<void *>(user_beg);
if (can_fill && fl.max_malloc_fill_size) {
uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
REAL(memset)(res, fl.malloc_fill_byte, fill_size);
}
#if CAN_SANITIZE_LEAKS
m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
: __lsan::kDirectlyLeaked;
#endif
// Must be the last mutation of metadata in this function.
atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
ASAN_MALLOC_HOOK(res, size);
return res;
}
void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
CHUNK_QUARANTINE, memory_order_acquire))
ReportInvalidFree(ptr, old_chunk_state, stack);
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
}
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlag.
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
AllocType alloc_type) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
if (m->alloc_type != alloc_type) {
if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
(AllocType)alloc_type);
}
}
CHECK_GE(m->alloc_tid, 0);
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
m->free_context_id = StackDepotPut(*stack);
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapFreeMagic);
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.frees++;
thread_stats.freed += m->UsedSize();
// Push into quarantine.
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
AllocatorCache *ac = GetAllocatorCache(ms);
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
m->UsedSize());
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *ac = &fallback_allocator_cache;
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
m->UsedSize());
}
}
void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
ASAN_FREE_HOOK(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
AtomicallySetQuarantineFlag(m, ptr, stack);
QuarantineChunk(m, ptr, stack, alloc_type);
}
void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
CHECK(old_ptr && new_size);
uptr p = reinterpret_cast<uptr>(old_ptr);
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.reallocs++;
thread_stats.realloced += new_size;
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
if (new_ptr) {
u8 chunk_state = m->chunk_state;
if (chunk_state != CHUNK_ALLOCATED)
ReportInvalidFree(old_ptr, chunk_state, stack);
CHECK_NE(REAL(memcpy), nullptr);
uptr memcpy_size = Min(new_size, m->UsedSize());
// If realloc() races with free(), we may start copying freed memory.
// However, we will report racy double-free later anyway.
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Deallocate(old_ptr, 0, stack, FROM_MALLOC);
}
return new_ptr;
}
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return allocator.ReturnNullOrDie();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
if (ptr && allocator.FromPrimary(ptr))
REAL(memset)(ptr, 0, nmemb * size);
return ptr;
}
void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
if (chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack);
else
ReportFreeNotMalloced((uptr)ptr, stack);
}
void CommitBack(AsanThreadLocalMallocStorage *ms) {
AllocatorCache *ac = GetAllocatorCache(ms);
quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
allocator.SwallowCache(ac);
}
// -------------------------- Chunk lookup ----------------------
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
AsanChunk *GetAsanChunk(void *alloc_beg) {
if (!alloc_beg) return nullptr;
if (!allocator.FromPrimary(alloc_beg)) {
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
return m;
}
uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
if (alloc_magic[0] == kAllocBegMagic)
return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
return reinterpret_cast<AsanChunk *>(alloc_beg);
}
AsanChunk *GetAsanChunkByAddr(uptr p) {
void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
}
// Allocator must be locked when this function is called.
AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
void *alloc_beg =
allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
}
uptr AllocationSize(uptr p) {
AsanChunk *m = GetAsanChunkByAddr(p);
if (!m) return 0;
if (m->chunk_state != CHUNK_ALLOCATED) return 0;
if (m->Beg() != p) return 0;
return m->UsedSize();
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
if (!m1) return AsanChunkView(m1);
sptr offset = 0;
if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
// The address is in the chunk's left redzone, so maybe it is actually
// a right buffer overflow from the other chunk to the left.
// Search a bit to the left to see if there is another chunk.
AsanChunk *m2 = nullptr;
for (uptr l = 1; l < GetPageSizeCached(); l++) {
m2 = GetAsanChunkByAddr(addr - l);
if (m2 == m1) continue; // Still the same chunk.
break;
}
if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
m1 = ChooseChunk(addr, m2, m1);
}
return AsanChunkView(m1);
}
void PrintStats() {
allocator.PrintStats();
}
void ForceLock() {
allocator.ForceLock();
fallback_mutex.Lock();
}
void ForceUnlock() {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
};
static Allocator instance(LINKER_INITIALIZED);
static AsanAllocator &get_allocator() {
return instance.allocator;
}
bool AsanChunkView::IsValid() {
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
}
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
StackTrace res = StackDepotGet(id);
CHECK(res.trace);
return res;
}
StackTrace AsanChunkView::GetAllocStack() {
return GetStackTraceFromId(chunk_->alloc_context_id);
}
StackTrace AsanChunkView::GetFreeStack() {
return GetStackTraceFromId(chunk_->free_context_id);
}
void InitializeAllocator(const AllocatorOptions &options) {
instance.Initialize(options);
}
void ReInitializeAllocator(const AllocatorOptions &options) {
instance.ReInitialize(options);
}
void GetAllocatorOptions(AllocatorOptions *options) {
instance.GetOptions(options);
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
return instance.FindHeapChunkByAddress(addr);
}
void AsanThreadLocalMallocStorage::CommitBack() {
instance.CommitBack(this);
}
void PrintInternalAllocatorStats() {
instance.PrintStats();
}
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
return instance.Allocate(size, alignment, stack, alloc_type, true);
}
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
instance.Deallocate(ptr, 0, stack, alloc_type);
}
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
instance.Deallocate(ptr, size, stack, alloc_type);
}
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
}
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
return instance.Calloc(nmemb, size, stack);
}
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
if (!p)
return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
instance.Deallocate(p, 0, stack, FROM_MALLOC);
return nullptr;
}
return instance.Reallocate(p, size, stack);
}
void *asan_valloc(uptr size, BufferedStackTrace *stack) {
return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
}
void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack) {
void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
}
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
if (!ptr) return 0;
uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
GET_STACK_TRACE_FATAL(pc, bp);
ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
}
return usable_size;
}
uptr asan_mz_size(const void *ptr) {
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
}
void asan_mz_force_lock() {
instance.ForceLock();
}
void asan_mz_force_unlock() {
instance.ForceUnlock();
}
void AsanSoftRssLimitExceededCallback(bool exceeded) {
instance.allocator.SetRssLimitIsExceeded(exceeded);
}
} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
void LockAllocator() {
__asan::get_allocator().ForceLock();
}
void UnlockAllocator() {
__asan::get_allocator().ForceUnlock();
}
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*begin = (uptr)&__asan::get_allocator();
*end = *begin + sizeof(__asan::get_allocator());
}
uptr PointsIntoChunk(void* p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if (m->chunk_state != __asan::CHUNK_ALLOCATED)
return 0;
if (m->AddrIsInside(addr, /*locked_version=*/true))
return chunk;
if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
addr))
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
CHECK(m);
return m->Beg();
}
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
}
bool LsanMetadata::allocated() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->chunk_state == __asan::CHUNK_ALLOCATED;
}
ChunkTag LsanMetadata::tag() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return static_cast<ChunkTag>(m->lsan_tag);
}
void LsanMetadata::set_tag(ChunkTag value) {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
m->lsan_tag = value;
}
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->UsedSize(/*locked_version=*/true);
}
u32 LsanMetadata::stack_trace_id() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->alloc_context_id;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
__asan::get_allocator().ForEachChunk(callback, arg);
}
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
if (!m) return kIgnoreObjectInvalid;
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
if (m->lsan_tag == kIgnored)
return kIgnoreObjectAlreadyIgnored;
m->lsan_tag = __lsan::kIgnored;
return kIgnoreObjectSuccess;
} else {
return kIgnoreObjectInvalid;
}
}
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size". We don't want to expose our redzone sizes, etc here.
uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
int __sanitizer_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
return instance.AllocationSize(ptr) > 0;
}
uptr __sanitizer_get_allocated_size(const void *p) {
if (!p) return 0;
uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = instance.AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
GET_STACK_TRACE_FATAL_HERE;
ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
}
return allocated_size;
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
} // extern "C"
#endif

View File

@ -7,12 +7,13 @@
// //
// This file is a part of AddressSanitizer, an address sanity checker. // This file is a part of AddressSanitizer, an address sanity checker.
// //
// ASan-private header for asan_allocator2.cc. // ASan-private header for asan_allocator.cc.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifndef ASAN_ALLOCATOR_H #ifndef ASAN_ALLOCATOR_H
#define ASAN_ALLOCATOR_H #define ASAN_ALLOCATOR_H
#include "asan_flags.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator.h"
@ -26,11 +27,22 @@ enum AllocType {
FROM_NEW_BR = 3 // Memory block came from operator new [ ] FROM_NEW_BR = 3 // Memory block came from operator new [ ]
}; };
static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk; struct AsanChunk;
void InitializeAllocator(); struct AllocatorOptions {
void ReInitializeAllocator(); u32 quarantine_size_mb;
u16 min_redzone;
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
};
void InitializeAllocator(const AllocatorOptions &options);
void ReInitializeAllocator(const AllocatorOptions &options);
void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView { class AsanChunkView {
public: public:
@ -100,6 +112,11 @@ struct AsanMapUnmapCallback {
# if defined(__powerpc64__) # if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL; const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T. const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
# elif defined(__aarch64__)
// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
const uptr kAllocatorSpace = 0x10000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
# else # else
const uptr kAllocatorSpace = 0x600000000000ULL; const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T. const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
@ -122,15 +139,16 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16,
AsanMapUnmapCallback> PrimaryAllocator; AsanMapUnmapCallback> PrimaryAllocator;
#endif // SANITIZER_CAN_USE_ALLOCATOR64 #endif // SANITIZER_CAN_USE_ALLOCATOR64
static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator; SecondaryAllocator> AsanAllocator;
struct AsanThreadLocalMallocStorage { struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16]; uptr quarantine_cache[16];
AllocatorCache allocator2_cache; AllocatorCache allocator_cache;
void CommitBack(); void CommitBack();
private: private:
// These objects are allocated via mmap() and are zero-initialized. // These objects are allocated via mmap() and are zero-initialized.
@ -158,6 +176,7 @@ void asan_mz_force_lock();
void asan_mz_force_unlock(); void asan_mz_force_unlock();
void PrintInternalAllocatorStats(); void PrintInternalAllocatorStats();
void AsanSoftRssLimitExceededCallback(bool exceeded);
} // namespace __asan } // namespace __asan
#endif // ASAN_ALLOCATOR_H #endif // ASAN_ALLOCATOR_H

View File

@ -1,790 +0,0 @@
//===-- asan_allocator2.cc ------------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Implementation of ASan's memory allocator, 2-nd version.
// This variant uses the allocator from sanitizer_common, i.e. the one shared
// with ThreadSanitizer and MemorySanitizer.
//
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
#include "lsan/lsan_common.h"
namespace __asan {
void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
PoisonShadow(p, size, 0);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
FlushUnneededASanShadowMemory(p, size);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.munmaps++;
thread_stats.munmaped += size;
}
// We can not use THREADLOCAL because it is not supported on some of the
// platforms we care about (OSX 10.6, Android).
// static THREADLOCAL AllocatorCache cache;
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
return &ms->allocator2_cache;
}
static Allocator allocator;
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20);
// Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
enum {
CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
CHUNK_ALLOCATED = 2,
CHUNK_QUARANTINE = 3
};
// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
// We use adaptive redzones: for larger allocation larger redzones are used.
static u32 RZLog2Size(u32 rz_log) {
CHECK_LT(rz_log, 8);
return 16 << rz_log;
}
static u32 RZSize2Log(u32 rz_size) {
CHECK_GE(rz_size, 16);
CHECK_LE(rz_size, 2048);
CHECK(IsPowerOfTwo(rz_size));
u32 res = Log2(rz_size) - 4;
CHECK_EQ(rz_size, RZLog2Size(res));
return res;
}
static uptr ComputeRZLog(uptr user_requested_size) {
u32 rz_log =
user_requested_size <= 64 - 16 ? 0 :
user_requested_size <= 128 - 32 ? 1 :
user_requested_size <= 512 - 64 ? 2 :
user_requested_size <= 4096 - 128 ? 3 :
user_requested_size <= (1 << 14) - 256 ? 4 :
user_requested_size <= (1 << 15) - 512 ? 5 :
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
RZSize2Log(flags()->max_redzone));
}
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
// U -- user memory.
// R -- right redzone (0 or more bytes)
// ChunkBase consists of ChunkHeader and other bytes that overlap with user
// memory.
// If the left redzone is greater than the ChunkHeader size we store a magic
// value in the first uptr word of the memory block and store the address of
// ChunkBase in the next uptr.
// M B L L L L L L L L L H H U U U U U U
// | ^
// ---------------------|
// M -- magic value kAllocBegMagic
// B -- address of ChunkHeader pointing to the first 'H'
static const uptr kAllocBegMagic = 0xCC6E96B9;
struct ChunkHeader {
// 1-st 8 bytes.
u32 chunk_state : 8; // Must be first.
u32 alloc_tid : 24;
u32 free_tid : 24;
u32 from_memalign : 1;
u32 alloc_type : 2;
u32 rz_log : 3;
u32 lsan_tag : 2;
// 2-nd 8 bytes
// This field is used for small sizes. For large sizes it is equal to
// SizeClassMap::kMaxSize and the actual size is stored in the
// SecondaryAllocator's metadata.
u32 user_requested_size;
u32 alloc_context_id;
};
struct ChunkBase : ChunkHeader {
// Header2, intersects with user memory.
u32 free_context_id;
};
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
COMPILER_CHECK(kChunkHeaderSize == 16);
COMPILER_CHECK(kChunkHeader2Size <= 16);
struct AsanChunk: ChunkBase {
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
uptr UsedSize(bool locked_version = false) {
if (user_requested_size != SizeClassMap::kMaxSize)
return user_requested_size;
return *reinterpret_cast<uptr *>(
allocator.GetMetaData(AllocBeg(locked_version)));
}
void *AllocBeg(bool locked_version = false) {
if (from_memalign) {
if (locked_version)
return allocator.GetBlockBeginFastLocked(
reinterpret_cast<void *>(this));
return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
}
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
bool AddrIsInside(uptr addr, bool locked_version = false) {
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
}
};
bool AsanChunkView::IsValid() {
return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
}
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
StackTrace res = StackDepotGet(id);
CHECK(res.trace);
return res;
}
StackTrace AsanChunkView::GetAllocStack() {
return GetStackTraceFromId(chunk_->alloc_context_id);
}
StackTrace AsanChunkView::GetFreeStack() {
return GetStackTraceFromId(chunk_->free_context_id);
}
struct QuarantineCallback;
typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
typedef AsanQuarantine::Cache QuarantineCache;
static AsanQuarantine quarantine(LINKER_INITIALIZED);
static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
}
struct QuarantineCallback {
explicit QuarantineCallback(AllocatorCache *cache)
: cache_(cache) {
}
void Recycle(AsanChunk *m) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
CHECK_NE(m->alloc_tid, kInvalidTid);
CHECK_NE(m->free_tid, kInvalidTid);
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapLeftRedzoneMagic);
void *p = reinterpret_cast<void *>(m->AllocBeg());
if (p != m) {
uptr *alloc_magic = reinterpret_cast<uptr *>(p);
CHECK_EQ(alloc_magic[0], kAllocBegMagic);
// Clear the magic value, as allocator internals may overwrite the
// contents of deallocated chunk, confusing GetAsanChunk lookup.
alloc_magic[0] = 0;
CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
}
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.real_frees++;
thread_stats.really_freed += m->UsedSize();
allocator.Deallocate(cache_, p);
}
void *Allocate(uptr size) {
return allocator.Allocate(cache_, size, 1, false);
}
void Deallocate(void *p) {
allocator.Deallocate(cache_, p);
}
AllocatorCache *cache_;
};
void InitializeAllocator() {
allocator.Init();
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
void ReInitializeAllocator() {
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type, bool can_fill) {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
if (alignment < min_alignment)
alignment = min_alignment;
if (size == 0) {
// We'd be happy to avoid allocating memory for zero-size requests, but
// some programs/tests depend on this behavior and assume that malloc would
// not return NULL even for zero-size allocations. Moreover, it looks like
// operator new should never return NULL, and results of consecutive "new"
// calls must be different even if the allocated size is zero.
size = 1;
}
CHECK(IsPowerOfTwo(alignment));
uptr rz_log = ComputeRZLog(size);
uptr rz_size = RZLog2Size(rz_log);
uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
bool using_primary_allocator = true;
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
needed_size += rz_size;
using_primary_allocator = false;
}
CHECK(IsAligned(needed_size, min_alignment));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
(void*)size);
return AllocatorReturnNull();
}
AsanThread *t = GetCurrentThread();
void *allocated;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocated = allocator.Allocate(cache, needed_size, 8, false);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8, false);
}
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
// chunk. This is possible if flags()->poison_heap was disabled for some
// time, for example, due to flags()->start_disabled.
// Anyway, poison the block before using it for anything else.
uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
}
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
uptr user_beg = beg_plus_redzone;
if (!IsAligned(user_beg, alignment))
user_beg = RoundUpTo(user_beg, alignment);
uptr user_end = user_beg + size;
CHECK_LE(user_end, alloc_end);
uptr chunk_beg = user_beg - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
m->alloc_type = alloc_type;
m->rz_log = rz_log;
u32 alloc_tid = t ? t->tid() : 0;
m->alloc_tid = alloc_tid;
CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
m->free_tid = kInvalidTid;
m->from_memalign = user_beg != beg_plus_redzone;
if (alloc_beg != chunk_beg) {
CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
}
if (using_primary_allocator) {
CHECK(size);
m->user_requested_size = size;
CHECK(allocator.FromPrimary(allocated));
} else {
CHECK(!allocator.FromPrimary(allocated));
m->user_requested_size = SizeClassMap::kMaxSize;
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
meta[0] = size;
meta[1] = chunk_beg;
}
m->alloc_context_id = StackDepotPut(*stack);
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
if (size_rounded_down_to_granularity)
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
// Deal with the end of the region if size is not aligned to granularity.
if (size != size_rounded_down_to_granularity && fl.poison_heap) {
u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
*shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
}
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mallocs++;
thread_stats.malloced += size;
thread_stats.malloced_redzones += needed_size - size;
uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
thread_stats.malloced_by_size[class_id]++;
if (needed_size > SizeClassMap::kMaxSize)
thread_stats.malloc_large++;
void *res = reinterpret_cast<void *>(user_beg);
if (can_fill && fl.max_malloc_fill_size) {
uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
REAL(memset)(res, fl.malloc_fill_byte, fill_size);
}
#if CAN_SANITIZE_LEAKS
m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
: __lsan::kDirectlyLeaked;
#endif
// Must be the last mutation of metadata in this function.
atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
ASAN_MALLOC_HOOK(res, size);
return res;
}
static void ReportInvalidFree(void *ptr, u8 chunk_state,
BufferedStackTrace *stack) {
if (chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack);
else
ReportFreeNotMalloced((uptr)ptr, stack);
}
static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
CHUNK_QUARANTINE, memory_order_acquire))
ReportInvalidFree(ptr, old_chunk_state, stack);
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
}
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlag.
static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
AllocType alloc_type) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
ReportAllocTypeMismatch((uptr)ptr, stack,
(AllocType)m->alloc_type, (AllocType)alloc_type);
CHECK_GE(m->alloc_tid, 0);
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
m->free_context_id = StackDepotPut(*stack);
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapFreeMagic);
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.frees++;
thread_stats.freed += m->UsedSize();
// Push into quarantine.
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
AllocatorCache *ac = GetAllocatorCache(ms);
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
m, m->UsedSize());
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *ac = &fallback_allocator_cache;
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
m, m->UsedSize());
}
}
static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
ASAN_FREE_HOOK(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
AtomicallySetQuarantineFlag(m, ptr, stack);
QuarantineChunk(m, ptr, stack, alloc_type);
}
static void *Reallocate(void *old_ptr, uptr new_size,
BufferedStackTrace *stack) {
CHECK(old_ptr && new_size);
uptr p = reinterpret_cast<uptr>(old_ptr);
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.reallocs++;
thread_stats.realloced += new_size;
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
if (new_ptr) {
u8 chunk_state = m->chunk_state;
if (chunk_state != CHUNK_ALLOCATED)
ReportInvalidFree(old_ptr, chunk_state, stack);
CHECK_NE(REAL(memcpy), (void*)0);
uptr memcpy_size = Min(new_size, m->UsedSize());
// If realloc() races with free(), we may start copying freed memory.
// However, we will report racy double-free later anyway.
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Deallocate(old_ptr, 0, stack, FROM_MALLOC);
}
return new_ptr;
}
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
static AsanChunk *GetAsanChunk(void *alloc_beg) {
if (!alloc_beg) return 0;
if (!allocator.FromPrimary(alloc_beg)) {
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
return m;
}
uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
if (alloc_magic[0] == kAllocBegMagic)
return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
return reinterpret_cast<AsanChunk *>(alloc_beg);
}
static AsanChunk *GetAsanChunkByAddr(uptr p) {
void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
}
// Allocator must be locked when this function is called.
static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
void *alloc_beg =
allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
}
static uptr AllocationSize(uptr p) {
AsanChunk *m = GetAsanChunkByAddr(p);
if (!m) return 0;
if (m->chunk_state != CHUNK_ALLOCATED) return 0;
if (m->Beg() != p) return 0;
return m->UsedSize();
}
// We have an address between two chunks, and we want to report just one.
AsanChunk *ChooseChunk(uptr addr,
AsanChunk *left_chunk, AsanChunk *right_chunk) {
// Prefer an allocated chunk over freed chunk and freed chunk
// over available chunk.
if (left_chunk->chunk_state != right_chunk->chunk_state) {
if (left_chunk->chunk_state == CHUNK_ALLOCATED)
return left_chunk;
if (right_chunk->chunk_state == CHUNK_ALLOCATED)
return right_chunk;
if (left_chunk->chunk_state == CHUNK_QUARANTINE)
return left_chunk;
if (right_chunk->chunk_state == CHUNK_QUARANTINE)
return right_chunk;
}
// Same chunk_state: choose based on offset.
sptr l_offset = 0, r_offset = 0;
CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
if (l_offset < r_offset)
return left_chunk;
return right_chunk;
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
if (!m1) return AsanChunkView(m1);
sptr offset = 0;
if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
// The address is in the chunk's left redzone, so maybe it is actually
// a right buffer overflow from the other chunk to the left.
// Search a bit to the left to see if there is another chunk.
AsanChunk *m2 = 0;
for (uptr l = 1; l < GetPageSizeCached(); l++) {
m2 = GetAsanChunkByAddr(addr - l);
if (m2 == m1) continue; // Still the same chunk.
break;
}
if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
m1 = ChooseChunk(addr, m2, m1);
}
return AsanChunkView(m1);
}
void AsanThreadLocalMallocStorage::CommitBack() {
AllocatorCache *ac = GetAllocatorCache(this);
quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
allocator.SwallowCache(GetAllocatorCache(this));
}
void PrintInternalAllocatorStats() {
allocator.PrintStats();
}
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
return Allocate(size, alignment, stack, alloc_type, true);
}
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
Deallocate(ptr, 0, stack, alloc_type);
}
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
Deallocate(ptr, size, stack, alloc_type);
}
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
return Allocate(size, 8, stack, FROM_MALLOC, true);
}
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return AllocatorReturnNull();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
if (ptr && allocator.FromPrimary(ptr))
REAL(memset)(ptr, 0, nmemb * size);
return ptr;
}
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
if (p == 0)
return Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
Deallocate(p, 0, stack, FROM_MALLOC);
return 0;
}
return Reallocate(p, size, stack);
}
void *asan_valloc(uptr size, BufferedStackTrace *stack) {
return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
}
void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
return Allocate(size, PageSize, stack, FROM_MALLOC, true);
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack) {
void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
}
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
if (ptr == 0) return 0;
uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
GET_STACK_TRACE_FATAL(pc, bp);
ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
}
return usable_size;
}
uptr asan_mz_size(const void *ptr) {
return AllocationSize(reinterpret_cast<uptr>(ptr));
}
void asan_mz_force_lock() {
allocator.ForceLock();
fallback_mutex.Lock();
}
void asan_mz_force_unlock() {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
void LockAllocator() {
__asan::allocator.ForceLock();
}
void UnlockAllocator() {
__asan::allocator.ForceUnlock();
}
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*begin = (uptr)&__asan::allocator;
*end = *begin + sizeof(__asan::allocator);
}
uptr PointsIntoChunk(void* p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if (m->chunk_state != __asan::CHUNK_ALLOCATED)
return 0;
if (m->AddrIsInside(addr, /*locked_version=*/true))
return chunk;
if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
addr))
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
__asan::AsanChunk *m =
__asan::GetAsanChunkByAddrFastLocked(chunk);
CHECK(m);
return m->Beg();
}
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
}
bool LsanMetadata::allocated() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->chunk_state == __asan::CHUNK_ALLOCATED;
}
ChunkTag LsanMetadata::tag() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return static_cast<ChunkTag>(m->lsan_tag);
}
void LsanMetadata::set_tag(ChunkTag value) {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
m->lsan_tag = value;
}
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->UsedSize(/*locked_version=*/true);
}
u32 LsanMetadata::stack_trace_id() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->alloc_context_id;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
__asan::allocator.ForEachChunk(callback, arg);
}
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
if (!m) return kIgnoreObjectInvalid;
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
if (m->lsan_tag == kIgnored)
return kIgnoreObjectAlreadyIgnored;
m->lsan_tag = __lsan::kIgnored;
return kIgnoreObjectSuccess;
} else {
return kIgnoreObjectInvalid;
}
}
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size". We don't want to expose our redzone sizes, etc here.
uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
int __sanitizer_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
return (AllocationSize(ptr) > 0);
}
uptr __sanitizer_get_allocated_size(const void *p) {
if (p == 0) return 0;
uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
GET_STACK_TRACE_FATAL_HERE;
ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
}
return allocated_size;
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
} // extern "C"
#endif

View File

@ -79,8 +79,8 @@ void AsanLocateAddress(uptr addr, AddressDescription *descr) {
GetInfoForHeapAddress(addr, descr); GetInfoForHeapAddress(addr, descr);
} }
uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id, static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
bool alloc_stack) { bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr); AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0; if (!chunk.IsValid()) return 0;
@ -106,14 +106,14 @@ uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
return 0; return 0;
} }
} // namespace __asan } // namespace __asan
using namespace __asan; using namespace __asan;
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
const char *__asan_locate_address(uptr addr, char *name, uptr name_size, const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
uptr *region_address, uptr *region_size) { uptr *region_address, uptr *region_size) {
AddressDescription descr = { name, name_size, 0, 0, 0 }; AddressDescription descr = { name, name_size, 0, 0, nullptr };
AsanLocateAddress(addr, &descr); AsanLocateAddress(addr, &descr);
if (region_address) *region_address = descr.region_address; if (region_address) *region_address = descr.region_address;
if (region_size) *region_size = descr.region_size; if (region_size) *region_size = descr.region_size;

View File

@ -9,6 +9,7 @@
// //
// FakeStack is used to detect use-after-return bugs. // FakeStack is used to detect use-after-return bugs.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "asan_allocator.h" #include "asan_allocator.h"
#include "asan_poisoning.h" #include "asan_poisoning.h"
#include "asan_thread.h" #include "asan_thread.h"
@ -20,13 +21,19 @@ static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
static const u64 kMagic4 = (kMagic2 << 16) | kMagic2; static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
static const u64 kMagic8 = (kMagic4 << 32) | kMagic4; static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
static const u64 kAllocaRedzoneSize = 32UL;
static const u64 kAllocaRedzoneMask = 31UL;
// For small size classes inline PoisonShadow for better performance. // For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3. CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr)); u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (class_id <= 6) { if (class_id <= 6) {
for (uptr i = 0; i < (1U << class_id); i++) for (uptr i = 0; i < (1U << class_id); i++) {
shadow[i] = magic; shadow[i] = magic;
// Make sure this does not become memset.
SanitizerBreakOptimization(nullptr);
}
} else { } else {
// The size class is too big, it's cheaper to poison only size bytes. // The size class is too big, it's cheaper to poison only size bytes.
PoisonShadow(ptr, size, static_cast<u8>(magic)); PoisonShadow(ptr, size, static_cast<u8>(magic));
@ -56,7 +63,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
void FakeStack::Destroy(int tid) { void FakeStack::Destroy(int tid) {
PoisonAll(0); PoisonAll(0);
if (common_flags()->verbosity >= 2) { if (Verbosity() >= 2) {
InternalScopedString str(kNumberOfSizeClasses * 50); InternalScopedString str(kNumberOfSizeClasses * 50);
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id], str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
@ -73,7 +80,9 @@ void FakeStack::PoisonAll(u8 magic) {
magic); magic);
} }
#if !defined(_MSC_VER) || defined(__clang__)
ALWAYS_INLINE USED ALWAYS_INLINE USED
#endif
FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
uptr real_stack) { uptr real_stack) {
CHECK_LT(class_id, kNumberOfSizeClasses); CHECK_LT(class_id, kNumberOfSizeClasses);
@ -99,7 +108,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
*SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos]; *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
return res; return res;
} }
return 0; // We are out of fake stack. return nullptr; // We are out of fake stack.
} }
uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
@ -176,7 +185,7 @@ void SetTLSFakeStack(FakeStack *fs) { }
static FakeStack *GetFakeStack() { static FakeStack *GetFakeStack() {
AsanThread *t = GetCurrentThread(); AsanThread *t = GetCurrentThread();
if (!t) return 0; if (!t) return nullptr;
return t->fake_stack(); return t->fake_stack();
} }
@ -184,40 +193,39 @@ static FakeStack *GetFakeStackFast() {
if (FakeStack *fs = GetTLSFakeStack()) if (FakeStack *fs = GetTLSFakeStack())
return fs; return fs;
if (!__asan_option_detect_stack_use_after_return) if (!__asan_option_detect_stack_use_after_return)
return 0; return nullptr;
return GetFakeStack(); return GetFakeStack();
} }
ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) { ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFast(); FakeStack *fs = GetFakeStackFast();
if (!fs) return real_stack; if (!fs) return 0;
uptr local_stack;
uptr real_stack = reinterpret_cast<uptr>(&local_stack);
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
if (!ff) if (!ff) return 0; // Out of fake stack.
return real_stack; // Out of fake stack, return the real one.
uptr ptr = reinterpret_cast<uptr>(ff); uptr ptr = reinterpret_cast<uptr>(ff);
SetShadow(ptr, size, class_id, 0); SetShadow(ptr, size, class_id, 0);
return ptr; return ptr;
} }
ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) { ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
if (ptr == real_stack)
return;
FakeStack::Deallocate(ptr, class_id); FakeStack::Deallocate(ptr, class_id);
SetShadow(ptr, size, class_id, kMagic8); SetShadow(ptr, size, class_id, kMagic8);
} }
} // namespace __asan } // namespace __asan
// ---------------------- Interface ---------------- {{{1 // ---------------------- Interface ---------------- {{{1
using namespace __asan; using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
__asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \ __asan_stack_malloc_##class_id(uptr size) { \
return OnMalloc(class_id, size, real_stack); \ return OnMalloc(class_id, size); \
} \ } \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
uptr ptr, uptr size, uptr real_stack) { \ uptr ptr, uptr size) { \
OnFree(ptr, class_id, size, real_stack); \ OnFree(ptr, class_id, size); \
} }
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0) DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
@ -239,15 +247,35 @@ SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end) { void **end) {
FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack); FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
if (!fs) return 0; if (!fs) return nullptr;
uptr frame_beg, frame_end; uptr frame_beg, frame_end;
FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack( FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
reinterpret_cast<uptr>(addr), &frame_beg, &frame_end)); reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
if (!frame) return 0; if (!frame) return nullptr;
if (frame->magic != kCurrentStackFrameMagic) if (frame->magic != kCurrentStackFrameMagic)
return 0; return nullptr;
if (beg) *beg = reinterpret_cast<void*>(frame_beg); if (beg) *beg = reinterpret_cast<void*>(frame_beg);
if (end) *end = reinterpret_cast<void*>(frame_end); if (end) *end = reinterpret_cast<void*>(frame_end);
return reinterpret_cast<void*>(frame->real_stack); return reinterpret_cast<void*>(frame->real_stack);
} }
} // extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_alloca_poison(uptr addr, uptr size) {
uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
uptr PartialRzAddr = addr + size;
uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
FastPoisonShadowPartialRightRedzone(
PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_allocas_unpoison(uptr top, uptr bottom) {
if ((!top) || (top > bottom)) return;
REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
(bottom - top) / SHADOW_GRANULARITY);
}
} // extern "C"

View File

@ -0,0 +1,177 @@
//===-- asan_flags.cc -------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan flag parsing logic.
//===----------------------------------------------------------------------===//
#include "asan_activation.h"
#include "asan_flags.h"
#include "asan_interface_internal.h"
#include "asan_stack.h"
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "ubsan/ubsan_flags.h"
#include "ubsan/ubsan_platform.h"
namespace __asan {
Flags asan_flags_dont_use_directly; // use via flags().
static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
}
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
// Stringize the macro value.
# define ASAN_STRINGIZE(x) #x
# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
#else
return "";
#endif
}
void Flags::SetDefaults() {
#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "asan_flags.inc"
#undef ASAN_FLAG
}
static void RegisterAsanFlags(FlagParser *parser, Flags *f) {
#define ASAN_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "asan_flags.inc"
#undef ASAN_FLAG
}
void InitializeFlags() {
// Set the default values and prepare for parsing ASan and common flags.
SetCommonFlagsDefaults();
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.detect_leaks = CAN_SANITIZE_LEAKS;
cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf.malloc_context_size = kDefaultMallocContextSize;
cf.intercept_tls_get_addr = true;
cf.exitcode = 1;
OverrideCommonFlags(cf);
}
Flags *f = flags();
f->SetDefaults();
FlagParser asan_parser;
RegisterAsanFlags(&asan_parser, f);
RegisterCommonFlags(&asan_parser);
// Set the default values and prepare for parsing LSan and UBSan flags
// (which can also overwrite common flags).
#if CAN_SANITIZE_LEAKS
__lsan::Flags *lf = __lsan::flags();
lf->SetDefaults();
FlagParser lsan_parser;
__lsan::RegisterLsanFlags(&lsan_parser, lf);
RegisterCommonFlags(&lsan_parser);
#endif
#if CAN_SANITIZE_UB
__ubsan::Flags *uf = __ubsan::flags();
uf->SetDefaults();
FlagParser ubsan_parser;
__ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
RegisterCommonFlags(&ubsan_parser);
#endif
// Override from ASan compile definition.
const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
asan_parser.ParseString(asan_compile_def);
// Override from user-specified string.
const char *asan_default_options = MaybeCallAsanDefaultOptions();
asan_parser.ParseString(asan_default_options);
#if CAN_SANITIZE_UB
const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
ubsan_parser.ParseString(ubsan_default_options);
#endif
// Override from command line.
asan_parser.ParseString(GetEnv("ASAN_OPTIONS"));
#if CAN_SANITIZE_LEAKS
lsan_parser.ParseString(GetEnv("LSAN_OPTIONS"));
#endif
#if CAN_SANITIZE_UB
ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
#endif
// Let activation flags override current settings. On Android they come
// from a system property. On other platforms this is no-op.
if (!flags()->start_deactivated) {
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
asan_parser.ParseString(buf);
}
SetVerbosity(common_flags()->verbosity);
// TODO(eugenis): dump all flags at verbosity>=2?
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) {
// TODO(samsonov): print all of the flags (ASan, LSan, common).
asan_parser.PrintFlagDescriptions();
}
// Flag validation:
if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
Report("%s: detect_leaks is not supported on this platform.\n",
SanitizerToolName);
Die();
}
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
if (f->strict_init_order) {
f->check_initialization_order = true;
}
CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax);
CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log);
CHECK_GE(f->redzone, 16);
CHECK_GE(f->max_redzone, f->redzone);
CHECK_LE(f->max_redzone, 2048);
CHECK(IsPowerOfTwo(f->redzone));
CHECK(IsPowerOfTwo(f->max_redzone));
// quarantine_size is deprecated but we still honor it.
// quarantine_size can not be used together with quarantine_size_mb.
if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) {
Report("%s: please use either 'quarantine_size' (deprecated) or "
"quarantine_size_mb, but not both\n", SanitizerToolName);
Die();
}
if (f->quarantine_size >= 0)
f->quarantine_size_mb = f->quarantine_size >> 20;
if (f->quarantine_size_mb < 0) {
const int kDefaultQuarantineSizeMb =
(ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
}
}
} // namespace __asan
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char* __asan_default_options() { return ""; }
} // extern "C"
#endif

View File

@ -14,6 +14,7 @@
#define ASAN_FLAGS_H #define ASAN_FLAGS_H
#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
// ASan flag values can be defined in four ways: // ASan flag values can be defined in four ways:
// 1) initialized with default values at startup. // 1) initialized with default values at startup.
@ -22,55 +23,24 @@
// 3) overriden from string returned by user-specified function // 3) overriden from string returned by user-specified function
// __asan_default_options(). // __asan_default_options().
// 4) overriden from env variable ASAN_OPTIONS. // 4) overriden from env variable ASAN_OPTIONS.
// 5) overriden during ASan activation (for now used on Android only).
namespace __asan { namespace __asan {
struct Flags { struct Flags {
// Flag descriptions are in asan_rtl.cc. #define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
int quarantine_size; #include "asan_flags.inc"
int redzone; #undef ASAN_FLAG
int max_redzone;
bool debug; void SetDefaults();
int report_globals;
bool check_initialization_order;
bool replace_str;
bool replace_intrin;
bool mac_ignore_invalid_free;
bool detect_stack_use_after_return;
int min_uar_stack_size_log;
int max_uar_stack_size_log;
bool uar_noreserve;
int max_malloc_fill_size, malloc_fill_byte;
int exitcode;
bool allow_user_poisoning;
int sleep_before_dying;
bool check_malloc_usable_size;
bool unmap_shadow_on_exit;
bool abort_on_error;
bool print_stats;
bool print_legend;
bool atexit;
bool allow_reexec;
bool print_full_thread_history;
bool poison_heap;
bool poison_partial;
bool poison_array_cookie;
bool alloc_dealloc_mismatch;
bool new_delete_type_mismatch;
bool strict_memcmp;
bool strict_init_order;
bool start_deactivated;
int detect_invalid_pointer_pairs;
bool detect_container_overflow;
int detect_odr_violation;
bool dump_instruction_bytes;
}; };
extern Flags asan_flags_dont_use_directly; extern Flags asan_flags_dont_use_directly;
inline Flags *flags() { inline Flags *flags() {
return &asan_flags_dont_use_directly; return &asan_flags_dont_use_directly;
} }
void InitializeFlags(Flags *f, const char *env);
void InitializeFlags();
} // namespace __asan } // namespace __asan

View File

@ -0,0 +1,134 @@
//===-- asan_flags.inc ------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// ASan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef ASAN_FLAG
# error "Define ASAN_FLAG prior to including this file!"
#endif
// ASAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
ASAN_FLAG(int, quarantine_size, -1,
"Deprecated, please use quarantine_size_mb.")
ASAN_FLAG(int, quarantine_size_mb, -1,
"Size (in Mb) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.")
ASAN_FLAG(int, redzone, 16,
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.")
ASAN_FLAG(int, max_redzone, 2048,
"Maximal size (in bytes) of redzones around heap objects.")
ASAN_FLAG(
bool, debug, false,
"If set, prints some debugging information and does additional checks.")
ASAN_FLAG(
int, report_globals, 1,
"Controls the way to handle globals (0 - don't detect buffer overflow on "
"globals, 1 - detect buffer overflow, 2 - print data about registered "
"globals).")
ASAN_FLAG(bool, check_initialization_order, false,
"If set, attempts to catch initialization order issues.")
ASAN_FLAG(
bool, replace_str, true,
"If set, uses custom wrappers and replacements for libc string functions "
"to find more errors.")
ASAN_FLAG(bool, replace_intrin, true,
"If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
ASAN_FLAG(bool, mac_ignore_invalid_free, false,
"Ignore invalid free() calls to work around some bugs. Used on OS X "
"only.")
ASAN_FLAG(bool, detect_stack_use_after_return, false,
"Enables stack-use-after-return checking at run-time.")
ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
"Minimum fake stack size log.")
ASAN_FLAG(int, max_uar_stack_size_log,
20, // 1Mb per size class, i.e. ~11Mb per thread
"Maximum fake stack size log.")
ASAN_FLAG(bool, uar_noreserve, false,
"Use mmap with 'noreserve' flag to allocate fake stack.")
ASAN_FLAG(
int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
"bytes that will be filled with malloc_fill_byte on malloc.")
ASAN_FLAG(int, malloc_fill_byte, 0xbe,
"Value used to fill the newly allocated memory.")
ASAN_FLAG(bool, allow_user_poisoning, true,
"If set, user may manually mark memory regions as poisoned or "
"unpoisoned.")
ASAN_FLAG(
int, sleep_before_dying, 0,
"Number of seconds to sleep between printing an error report and "
"terminating the program. Useful for debugging purposes (e.g. when one "
"needs to attach gdb).")
ASAN_FLAG(bool, check_malloc_usable_size, true,
"Allows the users to work around the bug in Nvidia drivers prior to "
"295.*.")
ASAN_FLAG(bool, unmap_shadow_on_exit, false,
"If set, explicitly unmaps the (huge) shadow at exit.")
ASAN_FLAG(bool, print_stats, false,
"Print various statistics after printing an error message or if "
"atexit=1.")
ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
ASAN_FLAG(bool, atexit, false,
"If set, prints ASan exit stats even after program terminates "
"successfully.")
ASAN_FLAG(
bool, print_full_thread_history, true,
"If set, prints thread creation stacks for the threads involved in the "
"report and their ancestors up to the main thread.")
ASAN_FLAG(
bool, poison_heap, true,
"Poison (or not) the heap memory on [de]allocation. Zero value is useful "
"for benchmarking the allocator or instrumentator.")
ASAN_FLAG(bool, poison_partial, true,
"If true, poison partially addressable 8-byte aligned words "
"(default=true). This flag affects heap and global buffers, but not "
"stack buffers.")
ASAN_FLAG(bool, poison_array_cookie, true,
"Poison (or not) the array cookie after operator new[].")
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// https://code.google.com/p/address-sanitizer/issues/detail?id=131
// https://code.google.com/p/address-sanitizer/issues/detail?id=309
// TODO(glider,timurrrr): Fix known issues and enable this back.
ASAN_FLAG(bool, alloc_dealloc_mismatch,
(SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
"Report errors on malloc/delete, new/free, new/delete[], etc.")
ASAN_FLAG(bool, new_delete_type_mismatch, true,
"Report errors on mismatch betwen size of new and delete.")
ASAN_FLAG(
bool, strict_init_order, false,
"If true, assume that dynamic initializers can never access globals from "
"other modules, even if the latter are already initialized.")
ASAN_FLAG(
bool, start_deactivated, false,
"If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
"poisoning) to reduce memory consumption as much as possible, and "
"restores them to original values when the first instrumented module is "
"loaded into the process. This is mainly intended to be used on "
"Android. ")
ASAN_FLAG(
int, detect_invalid_pointer_pairs, 0,
"If non-zero, try to detect operations like <, <=, >, >= and - on "
"invalid pointer pairs (e.g. when pointers belong to different objects). "
"The bigger the value the harder we try.")
ASAN_FLAG(
bool, detect_container_overflow, true,
"If true, honor the container overflow annotations. "
"See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow")
ASAN_FLAG(int, detect_odr_violation, 2,
"If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables "
"have different sizes")
ASAN_FLAG(bool, dump_instruction_bytes, false,
"If true, dump 16 bytes starting at the instruction that caused SEGV")
ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")

View File

@ -9,6 +9,7 @@
// //
// Handle globals. // Handle globals.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_mapping.h" #include "asan_mapping.h"
@ -16,6 +17,7 @@
#include "asan_report.h" #include "asan_report.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "asan_stats.h" #include "asan_stats.h"
#include "asan_suppressions.h"
#include "asan_thread.h" #include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_mutex.h" #include "sanitizer_common/sanitizer_mutex.h"
@ -71,7 +73,7 @@ ALWAYS_INLINE void PoisonRedZones(const Global &g) {
const uptr kMinimalDistanceFromAnotherGlobal = 64; const uptr kMinimalDistanceFromAnotherGlobal = 64;
bool IsAddressNearGlobal(uptr addr, const __asan_global &g) { static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false; if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
if (addr >= g.beg + g.size_with_redzone) return false; if (addr >= g.beg + g.size_with_redzone) return false;
return true; return true;
@ -88,46 +90,8 @@ static void ReportGlobal(const Global &g, const char *prefix) {
} }
} }
static bool DescribeOrGetInfoIfGlobal(uptr addr, uptr size, bool print, static u32 FindRegistrationSite(const Global *g) {
Global *output_global) { mu_for_globals.CheckLocked();
if (!flags()->report_globals) return false;
BlockingMutexLock lock(&mu_for_globals);
bool res = false;
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g;
if (print) {
if (flags()->report_globals >= 2)
ReportGlobal(g, "Search");
res |= DescribeAddressRelativeToGlobal(addr, size, g);
} else {
if (IsAddressNearGlobal(addr, g)) {
CHECK(output_global);
*output_global = g;
return true;
}
}
}
return res;
}
bool DescribeAddressIfGlobal(uptr addr, uptr size) {
return DescribeOrGetInfoIfGlobal(addr, size, /* print */ true,
/* output_global */ nullptr);
}
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
Global g = {};
if (DescribeOrGetInfoIfGlobal(addr, /* size */ 1, /* print */ false, &g)) {
internal_strncpy(descr->name, g.name, descr->name_size);
descr->region_address = g.beg;
descr->region_size = g.size;
descr->region_kind = "global";
return true;
}
return false;
}
u32 FindRegistrationSite(const Global *g) {
CHECK(global_registration_site_vector); CHECK(global_registration_site_vector);
for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) {
GlobalRegistrationSite &grs = (*global_registration_site_vector)[i]; GlobalRegistrationSite &grs = (*global_registration_site_vector)[i];
@ -137,6 +101,38 @@ u32 FindRegistrationSite(const Global *g) {
return 0; return 0;
} }
int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
int max_globals) {
if (!flags()->report_globals) return 0;
BlockingMutexLock lock(&mu_for_globals);
int res = 0;
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g;
if (flags()->report_globals >= 2)
ReportGlobal(g, "Search");
if (IsAddressNearGlobal(addr, g)) {
globals[res] = g;
if (reg_sites)
reg_sites[res] = FindRegistrationSite(&g);
res++;
if (res == max_globals) break;
}
}
return res;
}
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
Global g = {};
if (GetGlobalsForAddress(addr, &g, nullptr, 1)) {
internal_strncpy(descr->name, g.name, descr->name_size);
descr->region_address = g.beg;
descr->region_size = g.size;
descr->region_kind = "global";
return true;
}
return false;
}
// Register a global variable. // Register a global variable.
// This function may be called more than once for every global // This function may be called more than once for every global
// so we store the globals in a map. // so we store the globals in a map.
@ -148,9 +144,7 @@ static void RegisterGlobal(const Global *g) {
CHECK(AddrIsInMem(g->beg)); CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg)); CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
// This "ODR violation" detection is fundamentally incompatible with if (flags()->detect_odr_violation) {
// how GCC registers globals. Disable as useless until rewritten upstream.
if (0 && flags()->detect_odr_violation) {
// Try detecting ODR (One Definition Rule) violation, i.e. the situation // Try detecting ODR (One Definition Rule) violation, i.e. the situation
// where two globals with the same name are defined in different modules. // where two globals with the same name are defined in different modules.
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) { if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
@ -158,20 +152,21 @@ static void RegisterGlobal(const Global *g) {
// the entire redzone of the second global may be within the first global. // the entire redzone of the second global may be within the first global.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->beg == l->g->beg && if (g->beg == l->g->beg &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size)) (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
!IsODRViolationSuppressed(g->name))
ReportODRViolation(g, FindRegistrationSite(g), ReportODRViolation(g, FindRegistrationSite(g),
l->g, FindRegistrationSite(l->g)); l->g, FindRegistrationSite(l->g));
} }
} }
} }
if (flags()->poison_heap) if (CanPoisonMemory())
PoisonRedZones(*g); PoisonRedZones(*g);
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals; ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
l->g = g; l->g = g;
l->next = list_of_all_globals; l->next = list_of_all_globals;
list_of_all_globals = l; list_of_all_globals = l;
if (g->has_dynamic_init) { if (g->has_dynamic_init) {
if (dynamic_init_globals == 0) { if (!dynamic_init_globals) {
dynamic_init_globals = new(allocator_for_globals) dynamic_init_globals = new(allocator_for_globals)
VectorOfGlobals(kDynamicInitGlobalsInitialCapacity); VectorOfGlobals(kDynamicInitGlobalsInitialCapacity);
} }
@ -182,11 +177,13 @@ static void RegisterGlobal(const Global *g) {
static void UnregisterGlobal(const Global *g) { static void UnregisterGlobal(const Global *g) {
CHECK(asan_inited); CHECK(asan_inited);
if (flags()->report_globals >= 2)
ReportGlobal(*g, "Removed");
CHECK(flags()->report_globals); CHECK(flags()->report_globals);
CHECK(AddrIsInMem(g->beg)); CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg)); CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
if (flags()->poison_heap) if (CanPoisonMemory())
PoisonShadowForGlobal(g, 0); PoisonShadowForGlobal(g, 0);
// We unpoison the shadow memory for the global but we do not remove it from // We unpoison the shadow memory for the global but we do not remove it from
// the list because that would require O(n^2) time with the current list // the list because that would require O(n^2) time with the current list
@ -208,7 +205,7 @@ void StopInitOrderChecking() {
} }
} }
} // namespace __asan } // namespace __asan
// ---------------------- Interface ---------------- {{{1 // ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
@ -216,7 +213,7 @@ using namespace __asan; // NOLINT
// Register an array of globals. // Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) { void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return; if (!flags()->report_globals) return;
GET_STACK_TRACE_FATAL_HERE; GET_STACK_TRACE_MALLOC;
u32 stack_id = StackDepotPut(stack); u32 stack_id = StackDepotPut(stack);
BlockingMutexLock lock(&mu_for_globals); BlockingMutexLock lock(&mu_for_globals);
if (!global_registration_site_vector) if (!global_registration_site_vector)
@ -249,7 +246,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// initializer can only touch global variables in the same TU. // initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(const char *module_name) { void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order || if (!flags()->check_initialization_order ||
!flags()->poison_heap) !CanPoisonMemory())
return; return;
bool strict_init_order = flags()->strict_init_order; bool strict_init_order = flags()->strict_init_order;
CHECK(dynamic_init_globals); CHECK(dynamic_init_globals);
@ -275,7 +272,7 @@ void __asan_before_dynamic_init(const char *module_name) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals. // TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() { void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order || if (!flags()->check_initialization_order ||
!flags()->poison_heap) !CanPoisonMemory())
return; return;
CHECK(asan_inited); CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals); BlockingMutexLock lock(&mu_for_globals);

View File

@ -23,8 +23,10 @@ extern "C" {
// contains the function PC as the 3-rd field (see // contains the function PC as the 3-rd field (see
// DescribeAddressIfStack). // DescribeAddressIfStack).
// v3=>v4: added '__asan_global_source_location' to __asan_global. // v3=>v4: added '__asan_global_source_location' to __asan_global.
#define __asan_init __asan_init_v4 // v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
#define __asan_init_name "__asan_init_v4" // __asan_stack_free_ functions.
// v5=>v6: changed the name of the version check symbol
#define __asan_version_mismatch_check __asan_version_mismatch_check_v6
} }
#endif // ASAN_INIT_VERSION_H #endif // ASAN_INIT_VERSION_H

View File

@ -9,8 +9,8 @@
// //
// Intercept various libc functions. // Intercept various libc functions.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
#include "asan_interceptors.h"
#include "asan_allocator.h" #include "asan_allocator.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_mapping.h" #include "asan_mapping.h"
@ -18,8 +18,19 @@
#include "asan_report.h" #include "asan_report.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "asan_stats.h" #include "asan_stats.h"
#include "asan_suppressions.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#if SANITIZER_POSIX
#include "sanitizer_common/sanitizer_posix.h"
#endif
#if defined(__i386) && SANITIZER_LINUX
#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1"
#elif defined(__mips__) && SANITIZER_LINUX
#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2"
#endif
namespace __asan { namespace __asan {
// Return true if we can quickly decide that the region is unpoisoned. // Return true if we can quickly decide that the region is unpoisoned.
@ -32,12 +43,16 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
return false; return false;
} }
struct AsanInterceptorContext {
const char *interceptor_name;
};
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE, // We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
// and ASAN_WRITE_RANGE as macro instead of function so // and ASAN_WRITE_RANGE as macro instead of function so
// that no extra frames are created, and stack trace contains // that no extra frames are created, and stack trace contains
// relevant information only. // relevant information only.
// We check all shadow bytes. // We check all shadow bytes.
#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \ #define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \
uptr __offset = (uptr)(offset); \ uptr __offset = (uptr)(offset); \
uptr __size = (uptr)(size); \ uptr __size = (uptr)(size); \
uptr __bad = 0; \ uptr __bad = 0; \
@ -47,13 +62,33 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
} \ } \
if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \ if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
(__bad = __asan_region_is_poisoned(__offset, __size))) { \ (__bad = __asan_region_is_poisoned(__offset, __size))) { \
GET_CURRENT_PC_BP_SP; \ AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
__asan_report_error(pc, bp, sp, __bad, isWrite, __size); \ bool suppressed = false; \
if (_ctx) { \
suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
if (!suppressed && HaveStackTraceBasedSuppressions()) { \
GET_STACK_TRACE_FATAL_HERE; \
suppressed = IsStackTraceSuppressed(&stack); \
} \
} \
if (!suppressed) { \
GET_CURRENT_PC_BP_SP; \
__asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0); \
} \
} \ } \
} while (0) } while (0)
#define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false) #define ASAN_READ_RANGE(ctx, offset, size) \
#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true) ACCESS_MEMORY_RANGE(ctx, offset, size, false)
#define ASAN_WRITE_RANGE(ctx, offset, size) \
ACCESS_MEMORY_RANGE(ctx, offset, size, true)
#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \
ASAN_READ_RANGE((ctx), (s), \
common_flags()->strict_string_checks ? (len) + 1 : (n))
#define ASAN_READ_STRING(ctx, s, n) \
ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
// Behavior of functions like "memcpy" or "strcpy" is undefined // Behavior of functions like "memcpy" or "strcpy" is undefined
// if memory intervals overlap. We report error in this case. // if memory intervals overlap. We report error in this case.
@ -74,7 +109,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
#if ASAN_INTERCEPT_STRNLEN #if ASAN_INTERCEPT_STRNLEN
if (REAL(strnlen) != 0) { if (REAL(strnlen)) {
return REAL(strnlen)(s, maxlen); return REAL(strnlen)(s, maxlen);
} }
#endif #endif
@ -92,7 +127,7 @@ int OnExit() {
return 0; return 0;
} }
} // namespace __asan } // namespace __asan
// ---------------------- Wrappers ---------------- {{{1 // ---------------------- Wrappers ---------------- {{{1
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
@ -100,31 +135,28 @@ using namespace __asan; // NOLINT
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#if !SANITIZER_MAC #define ASAN_INTERCEPTOR_ENTER(ctx, func) \
#define ASAN_INTERCEPT_FUNC(name) \ AsanInterceptorContext _ctx = {#func}; \
do { \ ctx = (void *)&_ctx; \
if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \ (void) ctx; \
VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \
} while (0)
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
#define ASAN_INTERCEPT_FUNC(name)
#endif // SANITIZER_MAC
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name) #define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
ASAN_WRITE_RANGE(ptr, size) ASAN_WRITE_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size) #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
ASAN_READ_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
ASAN_INTERCEPTOR_ENTER(ctx, func); \
do { \ do { \
if (asan_init_is_running) \ if (asan_init_is_running) \
return REAL(func)(__VA_ARGS__); \ return REAL(func)(__VA_ARGS__); \
ctx = 0; \
(void) ctx; \
if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \ if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
return REAL(func)(__VA_ARGS__); \ return REAL(func)(__VA_ARGS__); \
ENSURE_ASAN_INITED(); \ ENSURE_ASAN_INITED(); \
} while (false) } while (false)
#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
do { \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \ do { \
} while (false) } while (false)
@ -143,14 +175,30 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
do { \ do { \
} while (false) } while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
// Strict init-order checking is dlopen-hostile:
// https://code.google.com/p/address-sanitizer/issues/detail?id=178
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
if (flags()->strict_init_order) { \
StopInitOrderChecking(); \
}
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() #define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping() #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping() CoverageUpdateMapping()
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping()
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
if (AsanThread *t = GetCurrentThread()) { \
*begin = t->tls_begin(); \
*end = t->tls_end(); \
} else { \
*begin = *end = 0; \
}
#include "sanitizer_common/sanitizer_common_interceptors.inc" #include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s) // Syscall interceptors don't have contexts, we don't support suppressions
#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s) // for them.
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s)
#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s)
#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
do { \ do { \
(void)(p); \ (void)(p); \
@ -163,56 +211,81 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} while (false) } while (false)
#include "sanitizer_common/sanitizer_common_syscalls.inc" #include "sanitizer_common/sanitizer_common_syscalls.inc"
struct ThreadStartParam {
atomic_uintptr_t t;
atomic_uintptr_t is_registered;
};
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread*)arg; ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
AsanThread *t = nullptr;
while ((t = reinterpret_cast<AsanThread *>(
atomic_load(&param->t, memory_order_acquire))) == nullptr)
internal_sched_yield();
SetCurrentThread(t); SetCurrentThread(t);
return t->ThreadStart(GetTid()); return t->ThreadStart(GetTid(), &param->is_registered);
} }
#if ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_PTHREAD_CREATE
INTERCEPTOR(int, pthread_create, void *thread, INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) { void *attr, void *(*start_routine)(void*), void *arg) {
EnsureMainThreadIDIsCorrect(); EnsureMainThreadIDIsCorrect();
// Strict init-order checking in thread-hostile. // Strict init-order checking is thread-hostile.
if (flags()->strict_init_order) if (flags()->strict_init_order)
StopInitOrderChecking(); StopInitOrderChecking();
GET_STACK_TRACE_THREAD; GET_STACK_TRACE_THREAD;
int detached = 0; int detached = 0;
if (attr != 0) if (attr)
REAL(pthread_attr_getdetachstate)(attr, &detached); REAL(pthread_attr_getdetachstate)(attr, &detached);
ThreadStartParam param;
u32 current_tid = GetCurrentTidOrInvalid(); atomic_store(&param.t, 0, memory_order_relaxed);
AsanThread *t = AsanThread::Create(start_routine, arg); atomic_store(&param.is_registered, 0, memory_order_relaxed);
CreateThreadContextArgs args = { t, &stack }; int result = REAL(pthread_create)(thread, attr, asan_thread_start, &param);
asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args); if (result == 0) {
return REAL(pthread_create)(thread, attr, asan_thread_start, t); u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t =
AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
atomic_store(&param.t, reinterpret_cast<uptr>(t), memory_order_release);
// Wait until the AsanThread object is initialized and the ThreadRegistry
// entry is in "started" state. One reason for this is that after this
// interceptor exits, the child thread's stack may be the only thing holding
// the |arg| pointer. This may cause LSan to report a leak if leak checking
// happens at a point when the interceptor has already exited, but the stack
// range for the child thread is not yet known.
while (atomic_load(&param.is_registered, memory_order_acquire) == 0)
internal_sched_yield();
}
return result;
} }
INTERCEPTOR(int, pthread_join, void *t, void **arg) {
return real_pthread_join(t, arg);
}
DEFINE_REAL_PTHREAD_FUNCTIONS
#endif // ASAN_INTERCEPT_PTHREAD_CREATE #endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION #if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if SANITIZER_ANDROID #if SANITIZER_ANDROID
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) { INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
if (!AsanInterceptsSignal(signum) || if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
common_flags()->allow_user_segv_handler) {
return REAL(bsd_signal)(signum, handler); return REAL(bsd_signal)(signum, handler);
} }
return 0; return 0;
} }
#else
INTERCEPTOR(void*, signal, int signum, void *handler) {
if (!AsanInterceptsSignal(signum) ||
common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return 0;
}
#endif #endif
INTERCEPTOR(void*, signal, int signum, void *handler) {
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return nullptr;
}
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act, INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) { struct sigaction *oldact) {
if (!AsanInterceptsSignal(signum) || if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact); return REAL(sigaction)(signum, act, oldact);
} }
return 0; return 0;
@ -220,10 +293,10 @@ INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
namespace __sanitizer { namespace __sanitizer {
int real_sigaction(int signum, const void *act, void *oldact) { int real_sigaction(int signum, const void *act, void *oldact) {
return REAL(sigaction)(signum, return REAL(sigaction)(signum, (const struct sigaction *)act,
(struct sigaction *)act, (struct sigaction *)oldact); (struct sigaction *)oldact);
} }
} // namespace __sanitizer } // namespace __sanitizer
#elif SANITIZER_POSIX #elif SANITIZER_POSIX
// We need to have defined REAL(sigaction) on posix systems. // We need to have defined REAL(sigaction) on posix systems.
@ -239,7 +312,7 @@ static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
ssize += stack - bottom; ssize += stack - bottom;
ssize = RoundUpTo(ssize, PageSize); ssize = RoundUpTo(ssize, PageSize);
static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb
if (ssize && ssize <= kMaxSaneContextStackSize) { if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) {
PoisonShadow(bottom, ssize, 0); PoisonShadow(bottom, ssize, 0);
} }
} }
@ -294,113 +367,73 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
} }
#endif #endif
#if SANITIZER_WINDOWS // memcpy is called during __asan_init() from the internals of printf(...).
INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) { // We do not treat memcpy with to==from as a bug.
CHECK(REAL(RaiseException)); // See http://llvm.org/bugs/show_bug.cgi?id=11763.
__asan_handle_no_return(); #define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \
REAL(RaiseException)(a, b, c, d); if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
} if (asan_init_is_running) { \
return REAL(memcpy)(to, from, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
if (to != from) { \
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
} \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return REAL(memcpy)(to, from, size); \
} while (0)
INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
CHECK(REAL(_except_handler3));
__asan_handle_no_return();
return REAL(_except_handler3)(a, b, c, d);
}
#if ASAN_DYNAMIC
// This handler is named differently in -MT and -MD CRTs.
#define _except_handler4 _except_handler4_common
#endif
INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
CHECK(REAL(_except_handler4));
__asan_handle_no_return();
return REAL(_except_handler4)(a, b, c, d);
}
#endif
static inline int CharCmp(unsigned char c1, unsigned char c2) {
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
}
INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
if (UNLIKELY(!asan_inited)) return internal_memcmp(a1, a2, size);
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
if (flags()->strict_memcmp) {
// Check the entire regions even if the first bytes of the buffers are
// different.
ASAN_READ_RANGE(a1, size);
ASAN_READ_RANGE(a2, size);
// Fallthrough to REAL(memcmp) below.
} else {
unsigned char c1 = 0, c2 = 0;
const unsigned char *s1 = (const unsigned char*)a1;
const unsigned char *s2 = (const unsigned char*)a2;
uptr i;
for (i = 0; i < size; i++) {
c1 = s1[i];
c2 = s2[i];
if (c1 != c2) break;
}
ASAN_READ_RANGE(s1, Min(i + 1, size));
ASAN_READ_RANGE(s2, Min(i + 1, size));
return CharCmp(c1, c2);
}
}
return REAL(memcmp(a1, a2, size));
}
void *__asan_memcpy(void *to, const void *from, uptr size) { void *__asan_memcpy(void *to, const void *from, uptr size) {
if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); ASAN_MEMCPY_IMPL(nullptr, to, from, size);
// memcpy is called during __asan_init() from the internals
// of printf(...).
if (asan_init_is_running) {
return REAL(memcpy)(to, from, size);
}
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
if (to != from) {
// We do not treat memcpy with to==from as a bug.
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
}
ASAN_READ_RANGE(from, size);
ASAN_WRITE_RANGE(to, size);
}
return REAL(memcpy)(to, from, size);
} }
// memset is called inside Printf.
#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
if (asan_init_is_running) { \
return REAL(memset)(block, c, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_WRITE_RANGE(ctx, block, size); \
} \
return REAL(memset)(block, c, size); \
} while (0)
void *__asan_memset(void *block, int c, uptr size) { void *__asan_memset(void *block, int c, uptr size) {
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); ASAN_MEMSET_IMPL(nullptr, block, c, size);
// memset is called inside Printf.
if (asan_init_is_running) {
return REAL(memset)(block, c, size);
}
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
ASAN_WRITE_RANGE(block, size);
}
return REAL(memset)(block, c, size);
} }
#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \
if (UNLIKELY(!asan_inited)) \
return internal_memmove(to, from, size); \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return internal_memmove(to, from, size); \
} while (0)
void *__asan_memmove(void *to, const void *from, uptr size) { void *__asan_memmove(void *to, const void *from, uptr size) {
if (UNLIKELY(!asan_inited)) ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
return internal_memmove(to, from, size);
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
ASAN_READ_RANGE(from, size);
ASAN_WRITE_RANGE(to, size);
}
return internal_memmove(to, from, size);
} }
INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) { INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
return __asan_memmove(to, from, size); void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, memmove);
ASAN_MEMMOVE_IMPL(ctx, to, from, size);
} }
INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) { INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, memcpy);
#if !SANITIZER_MAC #if !SANITIZER_MAC
return __asan_memcpy(to, from, size); ASAN_MEMCPY_IMPL(ctx, to, from, size);
#else #else
// At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
// with WRAP(memcpy). As a result, false positives are reported for memmove() // with WRAP(memcpy). As a result, false positives are reported for memmove()
@ -408,15 +441,19 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
// ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
// internal_memcpy(), which may lead to crashes, see // internal_memcpy(), which may lead to crashes, see
// http://llvm.org/bugs/show_bug.cgi?id=16362. // http://llvm.org/bugs/show_bug.cgi?id=16362.
return __asan_memmove(to, from, size); ASAN_MEMMOVE_IMPL(ctx, to, from, size);
#endif // !SANITIZER_MAC #endif // !SANITIZER_MAC
} }
INTERCEPTOR(void*, memset, void *block, int c, uptr size) { INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
return __asan_memset(block, c, size); void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, memset);
ASAN_MEMSET_IMPL(ctx, block, c, size);
} }
INTERCEPTOR(char*, strchr, const char *str, int c) { INTERCEPTOR(char*, strchr, const char *str, int c) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strchr);
if (UNLIKELY(!asan_inited)) return internal_strchr(str, c); if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
// strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is // strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
// used. // used.
@ -426,8 +463,9 @@ INTERCEPTOR(char*, strchr, const char *str, int c) {
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
char *result = REAL(strchr)(str, c); char *result = REAL(strchr)(str, c);
if (flags()->replace_str) { if (flags()->replace_str) {
uptr bytes_read = (result ? result - str : REAL(strlen)(str)) + 1; uptr len = REAL(strlen)(str);
ASAN_READ_RANGE(str, bytes_read); uptr bytes_read = (result ? result - str : len) + 1;
ASAN_READ_STRING_OF_LEN(ctx, str, len, bytes_read);
} }
return result; return result;
} }
@ -449,13 +487,15 @@ DEFINE_REAL(char*, index, const char *string, int c)
// For both strcat() and strncat() we need to check the validity of |to| // For both strcat() and strncat() we need to check the validity of |to|
// argument irrespective of the |from| length. // argument irrespective of the |from| length.
INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
if (flags()->replace_str) { if (flags()->replace_str) {
uptr from_length = REAL(strlen)(from); uptr from_length = REAL(strlen)(from);
ASAN_READ_RANGE(from, from_length + 1); ASAN_READ_RANGE(ctx, from, from_length + 1);
uptr to_length = REAL(strlen)(to); uptr to_length = REAL(strlen)(to);
ASAN_READ_RANGE(to, to_length); ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
ASAN_WRITE_RANGE(to + to_length, from_length + 1); ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
// If the copying actually happens, the |from| string should not overlap // If the copying actually happens, the |from| string should not overlap
// with the resulting string starting at |to|, which has a length of // with the resulting string starting at |to|, which has a length of
// to_length + from_length + 1. // to_length + from_length + 1.
@ -468,14 +508,16 @@ INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT
} }
INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strncat);
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
if (flags()->replace_str) { if (flags()->replace_str) {
uptr from_length = MaybeRealStrnlen(from, size); uptr from_length = MaybeRealStrnlen(from, size);
uptr copy_length = Min(size, from_length + 1); uptr copy_length = Min(size, from_length + 1);
ASAN_READ_RANGE(from, copy_length); ASAN_READ_RANGE(ctx, from, copy_length);
uptr to_length = REAL(strlen)(to); uptr to_length = REAL(strlen)(to);
ASAN_READ_RANGE(to, to_length); ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
ASAN_WRITE_RANGE(to + to_length, from_length + 1); ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
if (from_length > 0) { if (from_length > 0) {
CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1, CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1,
from, copy_length); from, copy_length);
@ -485,6 +527,8 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
} }
INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT
#if SANITIZER_MAC #if SANITIZER_MAC
if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT
#endif #endif
@ -497,19 +541,21 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
if (flags()->replace_str) { if (flags()->replace_str) {
uptr from_size = REAL(strlen)(from) + 1; uptr from_size = REAL(strlen)(from) + 1;
CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size); CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
ASAN_READ_RANGE(from, from_size); ASAN_READ_RANGE(ctx, from, from_size);
ASAN_WRITE_RANGE(to, from_size); ASAN_WRITE_RANGE(ctx, to, from_size);
} }
return REAL(strcpy)(to, from); // NOLINT return REAL(strcpy)(to, from); // NOLINT
} }
#if ASAN_INTERCEPT_STRDUP #if ASAN_INTERCEPT_STRDUP
INTERCEPTOR(char*, strdup, const char *s) { INTERCEPTOR(char*, strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
if (UNLIKELY(!asan_inited)) return internal_strdup(s); if (UNLIKELY(!asan_inited)) return internal_strdup(s);
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
uptr length = REAL(strlen)(s); uptr length = REAL(strlen)(s);
if (flags()->replace_str) { if (flags()->replace_str) {
ASAN_READ_RANGE(s, length + 1); ASAN_READ_RANGE(ctx, s, length + 1);
} }
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
void *new_mem = asan_malloc(length + 1, &stack); void *new_mem = asan_malloc(length + 1, &stack);
@ -519,6 +565,8 @@ INTERCEPTOR(char*, strdup, const char *s) {
#endif #endif
INTERCEPTOR(SIZE_T, strlen, const char *s) { INTERCEPTOR(SIZE_T, strlen, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strlen);
if (UNLIKELY(!asan_inited)) return internal_strlen(s); if (UNLIKELY(!asan_inited)) return internal_strlen(s);
// strlen is called from malloc_default_purgeable_zone() // strlen is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac. // in __asan::ReplaceSystemAlloc() on Mac.
@ -528,78 +576,65 @@ INTERCEPTOR(SIZE_T, strlen, const char *s) {
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
SIZE_T length = REAL(strlen)(s); SIZE_T length = REAL(strlen)(s);
if (flags()->replace_str) { if (flags()->replace_str) {
ASAN_READ_RANGE(s, length + 1); ASAN_READ_RANGE(ctx, s, length + 1);
} }
return length; return length;
} }
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) { INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, wcslen);
SIZE_T length = REAL(wcslen)(s); SIZE_T length = REAL(wcslen)(s);
if (!asan_init_is_running) { if (!asan_init_is_running) {
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
ASAN_READ_RANGE(s, (length + 1) * sizeof(wchar_t)); ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t));
} }
return length; return length;
} }
INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strncpy);
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
if (flags()->replace_str) { if (flags()->replace_str) {
uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1); uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size); CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size);
ASAN_READ_RANGE(from, from_size); ASAN_READ_RANGE(ctx, from, from_size);
ASAN_WRITE_RANGE(to, size); ASAN_WRITE_RANGE(ctx, to, size);
} }
return REAL(strncpy)(to, from, size); return REAL(strncpy)(to, from, size);
} }
#if ASAN_INTERCEPT_STRNLEN #if ASAN_INTERCEPT_STRNLEN
INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) { INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strnlen);
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
uptr length = REAL(strnlen)(s, maxlen); uptr length = REAL(strnlen)(s, maxlen);
if (flags()->replace_str) { if (flags()->replace_str) {
ASAN_READ_RANGE(s, Min(length + 1, maxlen)); ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen));
} }
return length; return length;
} }
#endif // ASAN_INTERCEPT_STRNLEN #endif // ASAN_INTERCEPT_STRNLEN
static inline bool IsValidStrtolBase(int base) {
return (base == 0) || (2 <= base && base <= 36);
}
static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) {
CHECK(endptr);
if (nptr == *endptr) {
// No digits were found at strtol call, we need to find out the last
// symbol accessed by strtoll on our own.
// We get this symbol by skipping leading blanks and optional +/- sign.
while (IsSpace(*nptr)) nptr++;
if (*nptr == '+' || *nptr == '-') nptr++;
*endptr = (char*)nptr;
}
CHECK(*endptr >= nptr);
}
INTERCEPTOR(long, strtol, const char *nptr, // NOLINT INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
char **endptr, int base) { char **endptr, int base) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strtol);
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
if (!flags()->replace_str) { if (!flags()->replace_str) {
return REAL(strtol)(nptr, endptr, base); return REAL(strtol)(nptr, endptr, base);
} }
char *real_endptr; char *real_endptr;
long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT
if (endptr != 0) { StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
*endptr = real_endptr;
}
if (IsValidStrtolBase(base)) {
FixRealStrtolEndptr(nptr, &real_endptr);
ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
}
return result; return result;
} }
INTERCEPTOR(int, atoi, const char *nptr) { INTERCEPTOR(int, atoi, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atoi);
#if SANITIZER_MAC #if SANITIZER_MAC
if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr); if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
#endif #endif
@ -614,11 +649,13 @@ INTERCEPTOR(int, atoi, const char *nptr) {
// different from int). So, we just imitate this behavior. // different from int). So, we just imitate this behavior.
int result = REAL(strtol)(nptr, &real_endptr, 10); int result = REAL(strtol)(nptr, &real_endptr, 10);
FixRealStrtolEndptr(nptr, &real_endptr); FixRealStrtolEndptr(nptr, &real_endptr);
ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
return result; return result;
} }
INTERCEPTOR(long, atol, const char *nptr) { // NOLINT INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atol);
#if SANITIZER_MAC #if SANITIZER_MAC
if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr); if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
#endif #endif
@ -629,33 +666,28 @@ INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
char *real_endptr; char *real_endptr;
long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT
FixRealStrtolEndptr(nptr, &real_endptr); FixRealStrtolEndptr(nptr, &real_endptr);
ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
return result; return result;
} }
#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL #if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT
char **endptr, int base) { char **endptr, int base) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strtoll);
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
if (!flags()->replace_str) { if (!flags()->replace_str) {
return REAL(strtoll)(nptr, endptr, base); return REAL(strtoll)(nptr, endptr, base);
} }
char *real_endptr; char *real_endptr;
long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT
if (endptr != 0) { StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
*endptr = real_endptr;
}
// If base has unsupported value, strtoll can exit with EINVAL
// without reading any characters. So do additional checks only
// if base is valid.
if (IsValidStrtolBase(base)) {
FixRealStrtolEndptr(nptr, &real_endptr);
ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
}
return result; return result;
} }
INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atoll);
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
if (!flags()->replace_str) { if (!flags()->replace_str) {
return REAL(atoll)(nptr); return REAL(atoll)(nptr);
@ -663,7 +695,7 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
char *real_endptr; char *real_endptr;
long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT
FixRealStrtolEndptr(nptr, &real_endptr); FixRealStrtolEndptr(nptr, &real_endptr);
ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
return result; return result;
} }
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL #endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
@ -681,7 +713,7 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
#endif #endif
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
int res = REAL(__cxa_atexit)(func, arg, dso_handle); int res = REAL(__cxa_atexit)(func, arg, dso_handle);
REAL(__cxa_atexit)(AtCxaAtexit, 0, 0); REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr);
return res; return res;
} }
#endif // ASAN_INTERCEPT___CXA_ATEXIT #endif // ASAN_INTERCEPT___CXA_ATEXIT
@ -696,35 +728,6 @@ INTERCEPTOR(int, fork, void) {
} }
#endif // ASAN_INTERCEPT_FORK #endif // ASAN_INTERCEPT_FORK
#if SANITIZER_WINDOWS
INTERCEPTOR_WINAPI(DWORD, CreateThread,
void* security, uptr stack_size,
DWORD (__stdcall *start_routine)(void*), void* arg,
DWORD thr_flags, void* tid) {
// Strict init-order checking in thread-hostile.
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(start_routine, arg);
CreateThreadContextArgs args = { t, &stack };
bool detached = false; // FIXME: how can we determine it on Windows?
asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
return REAL(CreateThread)(security, stack_size,
asan_thread_start, t, thr_flags, tid);
}
namespace __asan {
void InitializeWindowsInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
ASAN_INTERCEPT_FUNC(RaiseException);
ASAN_INTERCEPT_FUNC(_except_handler3);
ASAN_INTERCEPT_FUNC(_except_handler4);
}
} // namespace __asan
#endif
// ---------------------- InitializeAsanInterceptors ---------------- {{{1 // ---------------------- InitializeAsanInterceptors ---------------- {{{1
namespace __asan { namespace __asan {
void InitializeAsanInterceptors() { void InitializeAsanInterceptors() {
@ -734,7 +737,6 @@ void InitializeAsanInterceptors() {
InitializeCommonInterceptors(); InitializeCommonInterceptors();
// Intercept mem* functions. // Intercept mem* functions.
ASAN_INTERCEPT_FUNC(memcmp);
ASAN_INTERCEPT_FUNC(memmove); ASAN_INTERCEPT_FUNC(memmove);
ASAN_INTERCEPT_FUNC(memset); ASAN_INTERCEPT_FUNC(memset);
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
@ -773,9 +775,8 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(sigaction); ASAN_INTERCEPT_FUNC(sigaction);
#if SANITIZER_ANDROID #if SANITIZER_ANDROID
ASAN_INTERCEPT_FUNC(bsd_signal); ASAN_INTERCEPT_FUNC(bsd_signal);
#else
ASAN_INTERCEPT_FUNC(signal);
#endif #endif
ASAN_INTERCEPT_FUNC(signal);
#endif #endif
#if ASAN_INTERCEPT_SWAPCONTEXT #if ASAN_INTERCEPT_SWAPCONTEXT
ASAN_INTERCEPT_FUNC(swapcontext); ASAN_INTERCEPT_FUNC(swapcontext);
@ -794,8 +795,13 @@ void InitializeAsanInterceptors() {
// Intercept threading-related functions // Intercept threading-related functions
#if ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_PTHREAD_CREATE
#if defined(ASAN_PTHREAD_CREATE_VERSION)
ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION);
#else
ASAN_INTERCEPT_FUNC(pthread_create); ASAN_INTERCEPT_FUNC(pthread_create);
#endif #endif
ASAN_INTERCEPT_FUNC(pthread_join);
#endif
// Intercept atexit function. // Intercept atexit function.
#if ASAN_INTERCEPT___CXA_ATEXIT #if ASAN_INTERCEPT___CXA_ATEXIT
@ -806,12 +812,9 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(fork); ASAN_INTERCEPT_FUNC(fork);
#endif #endif
// Some Windows-specific interceptors. InitializePlatformInterceptors();
#if SANITIZER_WINDOWS
InitializeWindowsInterceptors();
#endif
VReport(1, "AddressSanitizer: libc interceptors initialized\n"); VReport(1, "AddressSanitizer: libc interceptors initialized\n");
} }
} // namespace __asan } // namespace __asan

View File

@ -13,7 +13,7 @@
#define ASAN_INTERCEPTORS_H #define ASAN_INTERCEPTORS_H
#include "asan_internal.h" #include "asan_internal.h"
#include "sanitizer_common/sanitizer_interception.h" #include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_platform_interceptors.h"
// Use macro to describe if specific function should be // Use macro to describe if specific function should be
@ -90,9 +90,27 @@ struct sigaction;
DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act, DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) struct sigaction *oldact)
#if !SANITIZER_MAC
#define ASAN_INTERCEPT_FUNC(name) \
do { \
if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \
VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \
} while (0)
#define ASAN_INTERCEPT_FUNC_VER(name, ver) \
do { \
if ((!INTERCEPT_FUNCTION_VER(name, ver) || !REAL(name))) \
VReport( \
1, "AddressSanitizer: failed to intercept '" #name "@@" #ver "'\n"); \
} while (0)
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
#define ASAN_INTERCEPT_FUNC(name)
#endif // SANITIZER_MAC
namespace __asan { namespace __asan {
void InitializeAsanInterceptors(); void InitializeAsanInterceptors();
void InitializePlatformInterceptors();
#define ENSURE_ASAN_INITED() do { \ #define ENSURE_ASAN_INITED() do { \
CHECK(!asan_init_is_running); \ CHECK(!asan_init_is_running); \

View File

@ -7,8 +7,11 @@
// //
// This file is a part of AddressSanitizer, an address sanity checker. // This file is a part of AddressSanitizer, an address sanity checker.
// //
// This header can be included by the instrumented program to fetch // This header declares the AddressSanitizer runtime interface functions.
// data (mostly allocator statistics) from ASan runtime library. // The runtime library has to define these functions so the instrumented program
// could call them.
//
// See also include/sanitizer/asan_interface.h
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifndef ASAN_INTERFACE_INTERNAL_H #ifndef ASAN_INTERFACE_INTERNAL_H
#define ASAN_INTERFACE_INTERNAL_H #define ASAN_INTERFACE_INTERNAL_H
@ -22,10 +25,14 @@ using __sanitizer::uptr;
extern "C" { extern "C" {
// This function should be called at the very beginning of the process, // This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc. // before any instrumented code is executed and before any call to malloc.
// Please note that __asan_init is a macro that is replaced with
// __asan_init_vXXX at compile-time.
SANITIZER_INTERFACE_ATTRIBUTE void __asan_init(); SANITIZER_INTERFACE_ATTRIBUTE void __asan_init();
// This function exists purely to get a linker/loader error when using
// incompatible versions of instrumentation and runtime library. Please note
// that __asan_version_mismatch_check is a macro that is replaced with
// __asan_version_mismatch_check_vXXX at compile-time.
SANITIZER_INTERFACE_ATTRIBUTE void __asan_version_mismatch_check();
// This structure is used to describe the source location of a place where // This structure is used to describe the source location of a place where
// global was defined. // global was defined.
struct __asan_global_source_location { struct __asan_global_source_location {
@ -123,10 +130,8 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void __asan_report_error(uptr pc, uptr bp, uptr sp, void __asan_report_error(uptr pc, uptr bp, uptr sp,
uptr addr, int is_write, uptr access_size); uptr addr, int is_write, uptr access_size, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE
int __asan_set_error_exit_code(int exit_code);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_death_callback(void (*callback)(void)); void __asan_set_death_callback(void (*callback)(void));
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
@ -160,6 +165,21 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load1(uptr p, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load2(uptr p, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load4(uptr p, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load8(uptr p, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load16(uptr p, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store1(uptr p, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store2(uptr p, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store4(uptr p, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store8(uptr p, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store16(uptr p, u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_loadN(uptr p, uptr size,
u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_storeN(uptr p, uptr size,
u32 exp);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void* __asan_memcpy(void *dst, const void *src, uptr size); void* __asan_memcpy(void *dst, const void *src, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
@ -175,6 +195,10 @@ extern "C" {
void __asan_poison_intra_object_redzone(uptr p, uptr size); void __asan_poison_intra_object_redzone(uptr p, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unpoison_intra_object_redzone(uptr p, uptr size); void __asan_unpoison_intra_object_redzone(uptr p, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_alloca_poison(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_allocas_unpoison(uptr top, uptr bottom);
} // extern "C" } // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H #endif // ASAN_INTERFACE_INTERNAL_H

View File

@ -19,8 +19,6 @@
#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#define ASAN_DEFAULT_FAILURE_EXITCODE 1
#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) #if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
# error "The AddressSanitizer run-time should not be" # error "The AddressSanitizer run-time should not be"
" instrumented by AddressSanitizer" " instrumented by AddressSanitizer"
@ -73,13 +71,11 @@ void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs(); void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT(); void AsanCheckIncompatibleRT();
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp); void AsanOnDeadlySignal(int, void *siginfo, void *context);
void AsanOnSIGSEGV(int, void *siginfo, void *context);
void DisableReexec();
void MaybeReexec(); void MaybeReexec();
bool AsanInterceptsSignal(int signum);
void ReadContextStack(void *context, uptr *stack, uptr *ssize); void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
void StopInitOrderChecking(); void StopInitOrderChecking();
// Wrapper for TLS/TSD. // Wrapper for TLS/TSD.
@ -90,10 +86,10 @@ void PlatformTSDDtor(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer); void AppendToErrorMessageBuffer(const char *buffer);
void ParseExtraActivationFlags();
void *AsanDlSymNext(const char *sym); void *AsanDlSymNext(const char *sym);
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
// Platform-specific options. // Platform-specific options.
#if SANITIZER_MAC #if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove(); bool PlatformHasDifferentMemcpyAndMemmove();
@ -134,6 +130,8 @@ const int kAsanGlobalRedzoneMagic = 0xf9;
const int kAsanInternalHeapMagic = 0xfe; const int kAsanInternalHeapMagic = 0xfe;
const int kAsanArrayCookieMagic = 0xac; const int kAsanArrayCookieMagic = 0xac;
const int kAsanIntraObjectRedzone = 0xbb; const int kAsanIntraObjectRedzone = 0xbb;
const int kAsanAllocaLeftMagic = 0xca;
const int kAsanAllocaRightMagic = 0xcb;
static const uptr kCurrentStackFrameMagic = 0x41B58AB3; static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
static const uptr kRetiredStackFrameMagic = 0x45E0360E; static const uptr kRetiredStackFrameMagic = 0x45E0360E;

View File

@ -66,6 +66,12 @@ asan_rt_version_t __asan_rt_version;
namespace __asan { namespace __asan {
void InitializePlatformInterceptors() {}
void DisableReexec() {
// No need to re-exec on Linux.
}
void MaybeReexec() { void MaybeReexec() {
// No need to re-exec on Linux. // No need to re-exec on Linux.
} }
@ -105,8 +111,11 @@ static void ReportIncompatibleRT() {
} }
void AsanCheckDynamicRTPrereqs() { void AsanCheckDynamicRTPrereqs() {
if (!ASAN_DYNAMIC)
return;
// Ensure that dynamic RT is the first DSO in the list // Ensure that dynamic RT is the first DSO in the list
const char *first_dso_name = 0; const char *first_dso_name = nullptr;
dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name); dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
if (first_dso_name && !IsDynamicRTName(first_dso_name)) { if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
Report("ASan runtime does not come first in initial library list; " Report("ASan runtime does not come first in initial library list; "
@ -131,7 +140,8 @@ void AsanCheckIncompatibleRT() {
// system libraries, causing crashes later in ASan initialization. // system libraries, causing crashes later in ASan initialization.
MemoryMappingLayout proc_maps(/*cache_enabled*/true); MemoryMappingLayout proc_maps(/*cache_enabled*/true);
char filename[128]; char filename[128];
while (proc_maps.Next(0, 0, 0, filename, sizeof(filename), 0)) { while (proc_maps.Next(nullptr, nullptr, nullptr, filename,
sizeof(filename), nullptr)) {
if (IsDynamicRTName(filename)) { if (IsDynamicRTName(filename)) {
Report("Your application is linked against " Report("Your application is linked against "
"incompatible ASan runtimes.\n"); "incompatible ASan runtimes.\n");
@ -144,87 +154,7 @@ void AsanCheckIncompatibleRT() {
} }
} }
} }
#endif // SANITIZER_ANDROID #endif // SANITIZER_ANDROID
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.arm_pc;
*bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp;
#elif defined(__aarch64__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.regs[29];
*sp = ucontext->uc_mcontext.sp;
#elif defined(__hppa__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
/* GCC uses %r3 whenever a frame pointer is needed. */
*bp = ucontext->uc_mcontext.sc_gr[3];
*sp = ucontext->uc_mcontext.sc_gr[30];
#elif defined(__x86_64__)
# if SANITIZER_FREEBSD
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.mc_rip;
*bp = ucontext->uc_mcontext.mc_rbp;
*sp = ucontext->uc_mcontext.mc_rsp;
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
*bp = ucontext->uc_mcontext.gregs[REG_RBP];
*sp = ucontext->uc_mcontext.gregs[REG_RSP];
# endif
#elif defined(__i386__)
# if SANITIZER_FREEBSD
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.mc_eip;
*bp = ucontext->uc_mcontext.mc_ebp;
*sp = ucontext->uc_mcontext.mc_esp;
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_EIP];
*bp = ucontext->uc_mcontext.gregs[REG_EBP];
*sp = ucontext->uc_mcontext.gregs[REG_ESP];
# endif
#elif defined(__powerpc__) || defined(__powerpc64__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.regs->nip;
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
// pointer, but GCC always uses r31 when we need a frame pointer.
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
#elif defined(__sparc__)
ucontext_t *ucontext = (ucontext_t*)context;
uptr *stk_ptr;
# if defined (__arch64__)
*pc = ucontext->uc_mcontext.mc_gregs[MC_PC];
*sp = ucontext->uc_mcontext.mc_gregs[MC_O6];
stk_ptr = (uptr *) (*sp + 2047);
*bp = stk_ptr[15];
# else
*pc = ucontext->uc_mcontext.gregs[REG_PC];
*sp = ucontext->uc_mcontext.gregs[REG_O6];
stk_ptr = (uptr *) *sp;
*bp = stk_ptr[15];
# endif
#elif defined(__mips__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[31];
*bp = ucontext->uc_mcontext.gregs[30];
*sp = ucontext->uc_mcontext.gregs[29];
#else
# error "Unsupported arch"
#endif
}
bool AsanInterceptsSignal(int signum) {
return signum == SIGSEGV && common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
// Nothing here for now.
}
#if !SANITIZER_ANDROID #if !SANITIZER_ANDROID
void ReadContextStack(void *context, uptr *stack, uptr *ssize) { void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
@ -242,6 +172,6 @@ void *AsanDlSymNext(const char *sym) {
return dlsym(RTLD_NEXT, sym); return dlsym(RTLD_NEXT, sym);
} }
} // namespace __asan } // namespace __asan
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX #endif // SANITIZER_FREEBSD || SANITIZER_LINUX

View File

@ -22,7 +22,14 @@
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mac.h" #include "sanitizer_common/sanitizer_mac.h"
#include <crt_externs.h> // for _NSGetArgv #if !SANITIZER_IOS
#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
#else
extern "C" {
extern char ***_NSGetArgv(void);
}
#endif
#include <dlfcn.h> // for dladdr() #include <dlfcn.h> // for dladdr()
#include <mach-o/dyld.h> #include <mach-o/dyld.h>
#include <mach-o/loader.h> #include <mach-o/loader.h>
@ -38,19 +45,7 @@
namespace __asan { namespace __asan {
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { void InitializePlatformInterceptors() {}
ucontext_t *ucontext = (ucontext_t*)context;
# if SANITIZER_WORDSIZE == 64
*pc = ucontext->uc_mcontext->__ss.__rip;
*bp = ucontext->uc_mcontext->__ss.__rbp;
*sp = ucontext->uc_mcontext->__ss.__rsp;
# else
*pc = ucontext->uc_mcontext->__ss.__eip;
*bp = ucontext->uc_mcontext->__ss.__ebp;
*sp = ucontext->uc_mcontext->__ss.__esp;
# endif // SANITIZER_WORDSIZE
}
bool PlatformHasDifferentMemcpyAndMemmove() { bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved // On OS X 10.7 memcpy() and memmove() are both resolved
@ -72,35 +67,51 @@ LowLevelAllocator allocator_for_env;
// otherwise the corresponding "NAME=value" string is replaced with // otherwise the corresponding "NAME=value" string is replaced with
// |name_value|. // |name_value|.
void LeakyResetEnv(const char *name, const char *name_value) { void LeakyResetEnv(const char *name, const char *name_value) {
char ***env_ptr = _NSGetEnviron(); char **env = GetEnviron();
CHECK(env_ptr);
char **environ = *env_ptr;
CHECK(environ);
uptr name_len = internal_strlen(name); uptr name_len = internal_strlen(name);
while (*environ != 0) { while (*env != 0) {
uptr len = internal_strlen(*environ); uptr len = internal_strlen(*env);
if (len > name_len) { if (len > name_len) {
const char *p = *environ; const char *p = *env;
if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') { if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
// Match. // Match.
if (name_value) { if (name_value) {
// Replace the old value with the new one. // Replace the old value with the new one.
*environ = const_cast<char*>(name_value); *env = const_cast<char*>(name_value);
} else { } else {
// Shift the subsequent pointers back. // Shift the subsequent pointers back.
char **del = environ; char **del = env;
do { do {
del[0] = del[1]; del[0] = del[1];
} while (*del++); } while (*del++);
} }
} }
} }
environ++; env++;
} }
} }
static bool reexec_disabled = false;
void DisableReexec() {
reexec_disabled = true;
}
extern "C" double dyldVersionNumber;
static const double kMinDyldVersionWithAutoInterposition = 360.0;
bool DyldNeedsEnvVariable() {
// If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
// DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
// GetMacosVersion() doesn't work for the simulator. Let's instead check
// `dyldVersionNumber`, which is exported by dyld, against a known version
// number from the first OS release where this appeared.
return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
}
void MaybeReexec() { void MaybeReexec() {
if (!flags()->allow_reexec) return; if (reexec_disabled) return;
// Make sure the dynamic ASan runtime library is preloaded so that the // Make sure the dynamic ASan runtime library is preloaded so that the
// wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
// ourselves. // ourselves.
@ -111,8 +122,12 @@ void MaybeReexec() {
uptr old_env_len = dyld_insert_libraries ? uptr old_env_len = dyld_insert_libraries ?
internal_strlen(dyld_insert_libraries) : 0; internal_strlen(dyld_insert_libraries) : 0;
uptr fname_len = internal_strlen(info.dli_fname); uptr fname_len = internal_strlen(info.dli_fname);
if (!dyld_insert_libraries || const char *dylib_name = StripModuleName(info.dli_fname);
!REAL(strstr)(dyld_insert_libraries, info.dli_fname)) { uptr dylib_name_len = internal_strlen(dylib_name);
bool lib_is_in_env =
dyld_insert_libraries && REAL(strstr)(dyld_insert_libraries, dylib_name);
if (DyldNeedsEnvVariable() && !lib_is_in_env) {
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
// library. // library.
char program_name[1024]; char program_name[1024];
@ -138,58 +153,77 @@ void MaybeReexec() {
VReport(1, "exec()-ing the program with\n"); VReport(1, "exec()-ing the program with\n");
VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env); VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
VReport(1, "to enable ASan wrappers.\n"); VReport(1, "to enable ASan wrappers.\n");
VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
execv(program_name, *_NSGetArgv()); execv(program_name, *_NSGetArgv());
} else {
// DYLD_INSERT_LIBRARIES is set and contains the runtime library.
if (old_env_len == fname_len) {
// It's just the runtime library name - fine to unset the variable.
LeakyResetEnv(kDyldInsertLibraries, NULL);
} else {
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
// Allocate memory to hold the previous env var name, its value, the '='
// sign and the '\0' char.
char *new_env = (char*)allocator_for_env.Allocate(
old_env_len + 2 + env_name_len);
CHECK(new_env);
internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
new_env[env_name_len] = '=';
char *new_env_pos = new_env + env_name_len + 1;
// Iterate over colon-separated pieces of |dyld_insert_libraries|. // We get here only if execv() failed.
char *piece_start = dyld_insert_libraries; Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
char *piece_end = NULL; "which is required for ASan to work. ASan tried to set the "
char *old_env_end = dyld_insert_libraries + old_env_len; "environment variable and re-execute itself, but execv() failed, "
do { "possibly because of sandbox restrictions. Make sure to launch the "
if (piece_start[0] == ':') piece_start++; "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
piece_end = REAL(strchr)(piece_start, ':'); CHECK("execv failed" && 0);
if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
uptr piece_len = piece_end - piece_start;
// If the current piece isn't the runtime library name,
// append it to new_env.
if ((piece_len != fname_len) ||
(internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) {
if (new_env_pos != new_env + env_name_len + 1) {
new_env_pos[0] = ':';
new_env_pos++;
}
internal_strncpy(new_env_pos, piece_start, piece_len);
}
// Move on to the next piece.
new_env_pos += piece_len;
piece_start = piece_end;
} while (piece_start < old_env_end);
// Can't use setenv() here, because it requires the allocator to be
// initialized.
// FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
// a separate function called after InitializeAllocator().
LeakyResetEnv(kDyldInsertLibraries, new_env);
}
} }
if (!lib_is_in_env)
return;
// DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
// the dylib from the environment variable, because interceptors are installed
// and we don't want our children to inherit the variable.
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
// Allocate memory to hold the previous env var name, its value, the '='
// sign and the '\0' char.
char *new_env = (char*)allocator_for_env.Allocate(
old_env_len + 2 + env_name_len);
CHECK(new_env);
internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
new_env[env_name_len] = '=';
char *new_env_pos = new_env + env_name_len + 1;
// Iterate over colon-separated pieces of |dyld_insert_libraries|.
char *piece_start = dyld_insert_libraries;
char *piece_end = NULL;
char *old_env_end = dyld_insert_libraries + old_env_len;
do {
if (piece_start[0] == ':') piece_start++;
piece_end = REAL(strchr)(piece_start, ':');
if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
uptr piece_len = piece_end - piece_start;
char *filename_start =
(char *)internal_memrchr(piece_start, '/', piece_len);
uptr filename_len = piece_len;
if (filename_start) {
filename_start += 1;
filename_len = piece_len - (filename_start - piece_start);
} else {
filename_start = piece_start;
}
// If the current piece isn't the runtime library name,
// append it to new_env.
if ((dylib_name_len != filename_len) ||
(internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
if (new_env_pos != new_env + env_name_len + 1) {
new_env_pos[0] = ':';
new_env_pos++;
}
internal_strncpy(new_env_pos, piece_start, piece_len);
new_env_pos += piece_len;
}
// Move on to the next piece.
piece_start = piece_end;
} while (piece_start < old_env_end);
// Can't use setenv() here, because it requires the allocator to be
// initialized.
// FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
// a separate function called after InitializeAllocator().
if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
LeakyResetEnv(kDyldInsertLibraries, new_env);
} }
// No-op. Mac does not support static linkage anyway. // No-op. Mac does not support static linkage anyway.
@ -203,14 +237,6 @@ void AsanCheckDynamicRTPrereqs() {}
// No-op. Mac does not support static linkage anyway. // No-op. Mac does not support static linkage anyway.
void AsanCheckIncompatibleRT() {} void AsanCheckIncompatibleRT() {}
bool AsanInterceptsSignal(int signum) {
return (signum == SIGSEGV || signum == SIGBUS) &&
common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
}
void ReadContextStack(void *context, uptr *stack, uptr *ssize) { void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
@ -262,9 +288,8 @@ ALWAYS_INLINE
void asan_register_worker_thread(int parent_tid, StackTrace *stack) { void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
AsanThread *t = GetCurrentThread(); AsanThread *t = GetCurrentThread();
if (!t) { if (!t) {
t = AsanThread::Create(0, 0); t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
CreateThreadContextArgs args = { t, stack }; parent_tid, stack, /* detached */ true);
asanThreadRegistry().CreateThread(*(uptr*)t, true, parent_tid, &args);
t->Init(); t->Init();
asanThreadRegistry().StartThread(t->tid(), 0, 0); asanThreadRegistry().StartThread(t->tid(), 0, 0);
SetCurrentThread(t); SetCurrentThread(t);
@ -311,7 +336,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
dispatch_function_t func) { \ dispatch_function_t func) { \
GET_STACK_TRACE_THREAD; \ GET_STACK_TRACE_THREAD; \
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \ asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
if (common_flags()->verbosity >= 2) { \ if (Verbosity() >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \ Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \ asan_ctxt, pthread_self()); \
PRINT_CURRENT_STACK(); \ PRINT_CURRENT_STACK(); \
@ -329,7 +354,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
dispatch_function_t func) { dispatch_function_t func) {
GET_STACK_TRACE_THREAD; GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (common_flags()->verbosity >= 2) { if (Verbosity() >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt); Report("dispatch_after_f: %p\n", asan_ctxt);
PRINT_CURRENT_STACK(); PRINT_CURRENT_STACK();
} }
@ -342,7 +367,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
dispatch_function_t func) { dispatch_function_t func) {
GET_STACK_TRACE_THREAD; GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (common_flags()->verbosity >= 2) { if (Verbosity() >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n", Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self()); asan_ctxt, pthread_self());
PRINT_CURRENT_STACK(); PRINT_CURRENT_STACK();
@ -372,13 +397,6 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
work(); \ work(); \
} }
// Forces the compiler to generate a frame pointer in the function.
#define ENABLE_FRAME_POINTER \
do { \
volatile uptr enable_fp; \
enable_fp = GET_CURRENT_FRAME(); \
} while (0)
INTERCEPTOR(void, dispatch_async, INTERCEPTOR(void, dispatch_async,
dispatch_queue_t dq, void(^work)(void)) { dispatch_queue_t dq, void(^work)(void)) {
ENABLE_FRAME_POINTER; ENABLE_FRAME_POINTER;
@ -402,6 +420,10 @@ INTERCEPTOR(void, dispatch_after,
INTERCEPTOR(void, dispatch_source_set_cancel_handler, INTERCEPTOR(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void(^work)(void)) { dispatch_source_t ds, void(^work)(void)) {
if (!work) {
REAL(dispatch_source_set_cancel_handler)(ds, work);
return;
}
ENABLE_FRAME_POINTER; ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work); GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_cancel_handler)(ds, asan_block); REAL(dispatch_source_set_cancel_handler)(ds, asan_block);

View File

@ -88,9 +88,9 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
ENSURE_ASAN_INITED(); ENSURE_ASAN_INITED();
// Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes. // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
size_t buflen = 6 + (name ? internal_strlen(name) : 0); size_t buflen = 6 + (name ? internal_strlen(name) : 0);
InternalScopedBuffer<char> new_name(buflen); InternalScopedString new_name(buflen);
if (name && zone->introspect == asan_zone.introspect) { if (name && zone->introspect == asan_zone.introspect) {
internal_snprintf(new_name.data(), buflen, "asan-%s", name); new_name.append("asan-%s", name);
name = new_name.data(); name = new_name.data();
} }
@ -150,13 +150,17 @@ INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
namespace { namespace {
// TODO(glider): the mz_* functions should be united with the Linux wrappers, // TODO(glider): the __asan_mz_* functions should be united with the Linux
// as they are basically copied from there. // wrappers, as they are basically copied from there.
size_t mz_size(malloc_zone_t* zone, const void* ptr) { extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
size_t __asan_mz_size(malloc_zone_t* zone, const void* ptr) {
return asan_mz_size(ptr); return asan_mz_size(ptr);
} }
void *mz_malloc(malloc_zone_t *zone, size_t size) { extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_mz_malloc(malloc_zone_t *zone, uptr size) {
if (UNLIKELY(!asan_inited)) { if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone); CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size); return malloc_zone_malloc(system_malloc_zone, size);
@ -165,7 +169,9 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
return asan_malloc(size, &stack); return asan_malloc(size, &stack);
} }
void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
if (UNLIKELY(!asan_inited)) { if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const size_t kCallocPoolSize = 1024; const size_t kCallocPoolSize = 1024;
@ -181,7 +187,9 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
return asan_calloc(nmemb, size, &stack); return asan_calloc(nmemb, size, &stack);
} }
void *mz_valloc(malloc_zone_t *zone, size_t size) { extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_mz_valloc(malloc_zone_t *zone, size_t size) {
if (UNLIKELY(!asan_inited)) { if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone); CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size); return malloc_zone_valloc(system_malloc_zone, size);
@ -208,11 +216,15 @@ void ALWAYS_INLINE free_common(void *context, void *ptr) {
} }
// TODO(glider): the allocation callbacks need to be refactored. // TODO(glider): the allocation callbacks need to be refactored.
void mz_free(malloc_zone_t *zone, void *ptr) { extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_mz_free(malloc_zone_t *zone, void *ptr) {
free_common(zone, ptr); free_common(zone, ptr);
} }
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (!ptr) { if (!ptr) {
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack); return asan_malloc(size, &stack);
@ -231,15 +243,16 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
} }
} }
void mz_destroy(malloc_zone_t* zone) { extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_mz_destroy(malloc_zone_t* zone) {
// A no-op -- we will not be destroyed! // A no-op -- we will not be destroyed!
Report("mz_destroy() called -- ignoring\n"); Report("__asan_mz_destroy() called -- ignoring\n");
} }
// from AvailabilityMacros.h extern "C"
#if defined(MAC_OS_X_VERSION_10_6) && \ SANITIZER_INTERFACE_ATTRIBUTE
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 void *__asan_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
if (UNLIKELY(!asan_inited)) { if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone); CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size); return malloc_zone_memalign(system_malloc_zone, align, size);
@ -250,12 +263,12 @@ void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
// This function is currently unused, and we build with -Werror. // This function is currently unused, and we build with -Werror.
#if 0 #if 0
void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) { void __asan_mz_free_definite_size(
malloc_zone_t* zone, void *ptr, size_t size) {
// TODO(glider): check that |size| is valid. // TODO(glider): check that |size| is valid.
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
#endif #endif
#endif
kern_return_t mi_enumerator(task_t task, void *, kern_return_t mi_enumerator(task_t task, void *,
unsigned type_mask, vm_address_t zone_address, unsigned type_mask, vm_address_t zone_address,
@ -297,13 +310,10 @@ void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
} }
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
boolean_t mi_zone_locked(malloc_zone_t *zone) { boolean_t mi_zone_locked(malloc_zone_t *zone) {
// UNIMPLEMENTED(); // UNIMPLEMENTED();
return false; return false;
} }
#endif
} // unnamed namespace } // unnamed namespace
@ -322,32 +332,25 @@ void ReplaceSystemMalloc() {
asan_introspection.force_lock = &mi_force_lock; asan_introspection.force_lock = &mi_force_lock;
asan_introspection.force_unlock = &mi_force_unlock; asan_introspection.force_unlock = &mi_force_unlock;
asan_introspection.statistics = &mi_statistics; asan_introspection.statistics = &mi_statistics;
asan_introspection.zone_locked = &mi_zone_locked;
internal_memset(&asan_zone, 0, sizeof(malloc_zone_t)); internal_memset(&asan_zone, 0, sizeof(malloc_zone_t));
// Start with a version 4 zone which is used for OS X 10.4 and 10.5. // Use version 6 for OSX >= 10.6.
asan_zone.version = 4; asan_zone.version = 6;
asan_zone.zone_name = "asan"; asan_zone.zone_name = "asan";
asan_zone.size = &mz_size; asan_zone.size = &__asan_mz_size;
asan_zone.malloc = &mz_malloc; asan_zone.malloc = &__asan_mz_malloc;
asan_zone.calloc = &mz_calloc; asan_zone.calloc = &__asan_mz_calloc;
asan_zone.valloc = &mz_valloc; asan_zone.valloc = &__asan_mz_valloc;
asan_zone.free = &mz_free; asan_zone.free = &__asan_mz_free;
asan_zone.realloc = &mz_realloc; asan_zone.realloc = &__asan_mz_realloc;
asan_zone.destroy = &mz_destroy; asan_zone.destroy = &__asan_mz_destroy;
asan_zone.batch_malloc = 0; asan_zone.batch_malloc = 0;
asan_zone.batch_free = 0; asan_zone.batch_free = 0;
asan_zone.introspect = &asan_introspection;
// from AvailabilityMacros.h
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
// Switch to version 6 on OSX 10.6 to support memalign.
asan_zone.version = 6;
asan_zone.free_definite_size = 0; asan_zone.free_definite_size = 0;
asan_zone.memalign = &mz_memalign; asan_zone.memalign = &__asan_mz_memalign;
asan_introspection.zone_locked = &mi_zone_locked; asan_zone.introspect = &asan_introspection;
#endif
// Register the ASan zone. // Register the ASan zone.
malloc_zone_register(&asan_zone); malloc_zone_register(&asan_zone);

View File

@ -17,7 +17,7 @@
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "sanitizer_common/sanitizer_interception.h" #include "interception/interception.h"
#include <stddef.h> #include <stddef.h>

View File

@ -57,13 +57,34 @@
// || `[0x20000000, 0x23ffffff]` || LowShadow || // || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem || // || `[0x00000000, 0x1fffffff]` || LowMem ||
// //
// Default Linux/MIPS mapping: // Default Linux/MIPS32 mapping:
// || `[0x2aaa0000, 0xffffffff]` || HighMem || // || `[0x2aaa0000, 0xffffffff]` || HighMem ||
// || `[0x0fff4000, 0x2aa9ffff]` || HighShadow || // || `[0x0fff4000, 0x2aa9ffff]` || HighShadow ||
// || `[0x0bff4000, 0x0fff3fff]` || ShadowGap || // || `[0x0bff4000, 0x0fff3fff]` || ShadowGap ||
// || `[0x0aaa0000, 0x0bff3fff]` || LowShadow || // || `[0x0aaa0000, 0x0bff3fff]` || LowShadow ||
// || `[0x00000000, 0x0aa9ffff]` || LowMem || // || `[0x00000000, 0x0aa9ffff]` || LowMem ||
// //
// Default Linux/MIPS64 mapping:
// || `[0x4000000000, 0xffffffffff]` || HighMem ||
// || `[0x2800000000, 0x3fffffffff]` || HighShadow ||
// || `[0x2400000000, 0x27ffffffff]` || ShadowGap ||
// || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
// || `[0x0000000000, 0x1fffffffff]` || LowMem ||
//
// Default Linux/AArch64 (39-bit VMA) mapping:
// || `[0x2000000000, 0x7fffffffff]` || highmem ||
// || `[0x1400000000, 0x1fffffffff]` || highshadow ||
// || `[0x1200000000, 0x13ffffffff]` || shadowgap ||
// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
//
// Default Linux/AArch64 (42-bit VMA) mapping:
// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
// || `[0x09000000000, 0x09fffffffff]` || shadowgap ||
// || `[0x08000000000, 0x08fffffffff]` || lowshadow ||
// || `[0x00000000000, 0x07fffffffff]` || lowmem ||
//
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: // Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem || // || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow || // || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
@ -77,36 +98,56 @@
// || `[0x48000000, 0x4bffffff]` || ShadowGap || // || `[0x48000000, 0x4bffffff]` || ShadowGap ||
// || `[0x40000000, 0x47ffffff]` || LowShadow || // || `[0x40000000, 0x47ffffff]` || LowShadow ||
// || `[0x00000000, 0x3fffffff]` || LowMem || // || `[0x00000000, 0x3fffffff]` || LowMem ||
//
// Default Windows/i386 mapping:
// (the exact location of HighShadow/HighMem may vary depending
// on WoW64, /LARGEADDRESSAWARE, etc).
// || `[0x50000000, 0xffffffff]` || HighMem ||
// || `[0x3a000000, 0x4fffffff]` || HighShadow ||
// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3; static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000 static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44; static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G. static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kIosShadowOffset64 = 0x130000000;
static const u64 kIosSimShadowOffset32 = 1ULL << 30;
static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64;
#if SANITIZER_AARCH64_VMA == 39
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
#elif SANITIZER_AARCH64_VMA == 42
static const u64 kAArch64_ShadowOffset64 = 1ULL << 39;
#endif
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36; static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
#define SHADOW_SCALE kDefaultShadowScale #define SHADOW_SCALE kDefaultShadowScale
#if SANITIZER_ANDROID
# define SHADOW_OFFSET (0)
#else #if SANITIZER_WORDSIZE == 32
# if SANITIZER_WORDSIZE == 32 # if SANITIZER_ANDROID
# if defined(__mips__) # define SHADOW_OFFSET (0)
# elif defined(__mips__)
# define SHADOW_OFFSET kMIPS32_ShadowOffset32 # define SHADOW_OFFSET kMIPS32_ShadowOffset32
# elif SANITIZER_FREEBSD # elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32 # define SHADOW_OFFSET kFreeBSD_ShadowOffset32
# elif SANITIZER_WINDOWS
# define SHADOW_OFFSET kWindowsShadowOffset32
# elif SANITIZER_IOSSIM
# define SHADOW_OFFSET kIosSimShadowOffset32
# elif SANITIZER_IOS
# define SHADOW_OFFSET kIosShadowOffset32
# else # else
# if SANITIZER_IOS # define SHADOW_OFFSET kDefaultShadowOffset32
# define SHADOW_OFFSET kIosShadowOffset32
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# endif # endif
# else #else
# if defined(__aarch64__) # if defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64 # define SHADOW_OFFSET kAArch64_ShadowOffset64
# elif defined(__powerpc64__) # elif defined(__powerpc64__)
@ -117,10 +158,13 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
# define SHADOW_OFFSET kDefaultShadowOffset64 # define SHADOW_OFFSET kDefaultShadowOffset64
# elif defined(__mips64) # elif defined(__mips64)
# define SHADOW_OFFSET kMIPS64_ShadowOffset64 # define SHADOW_OFFSET kMIPS64_ShadowOffset64
# elif SANITIZER_IOSSIM
# define SHADOW_OFFSET kIosSimShadowOffset64
# elif SANITIZER_IOS
# define SHADOW_OFFSET kIosShadowOffset64
# else # else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset # define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif # endif
# endif
#endif #endif
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) #define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
@ -143,7 +187,8 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
// With the zero shadow base we can not actually map pages starting from 0. // With the zero shadow base we can not actually map pages starting from 0.
// This constant is somewhat arbitrary. // This constant is somewhat arbitrary.
#define kZeroBaseShadowStart (1 << 18) #define kZeroBaseShadowStart 0
#define kZeroBaseMaxShadowStart (1 << 18)
#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \ #define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \
: kZeroBaseShadowStart) : kZeroBaseShadowStart)

View File

@ -14,7 +14,7 @@
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "sanitizer_common/sanitizer_interception.h" #include "interception/interception.h"
#include <stddef.h> #include <stddef.h>
@ -88,11 +88,11 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
#if !SANITIZER_MAC #if !SANITIZER_MAC
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr) throw() { void operator delete(void *ptr) NOEXCEPT {
OPERATOR_DELETE_BODY(FROM_NEW); OPERATOR_DELETE_BODY(FROM_NEW);
} }
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr) throw() { void operator delete[](void *ptr) NOEXCEPT {
OPERATOR_DELETE_BODY(FROM_NEW_BR); OPERATOR_DELETE_BODY(FROM_NEW_BR);
} }
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
@ -104,12 +104,12 @@ void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR); OPERATOR_DELETE_BODY(FROM_NEW_BR);
} }
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, size_t size) throw() { void operator delete(void *ptr, size_t size) NOEXCEPT {
GET_STACK_TRACE_FREE; GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW); asan_sized_free(ptr, size, &stack, FROM_NEW);
} }
CXX_OPERATOR_ATTRIBUTE CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size) throw() { void operator delete[](void *ptr, size_t size) NOEXCEPT {
GET_STACK_TRACE_FREE; GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW_BR); asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
} }

View File

@ -13,13 +13,24 @@
#include "asan_poisoning.h" #include "asan_poisoning.h"
#include "asan_report.h" #include "asan_report.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
namespace __asan { namespace __asan {
static atomic_uint8_t can_poison_memory;
void SetCanPoisonMemory(bool value) {
atomic_store(&can_poison_memory, value, memory_order_release);
}
bool CanPoisonMemory() {
return atomic_load(&can_poison_memory, memory_order_acquire);
}
void PoisonShadow(uptr addr, uptr size, u8 value) { void PoisonShadow(uptr addr, uptr size, u8 value) {
if (!flags()->poison_heap) return; if (!CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr)); CHECK(AddrIsInMem(addr));
CHECK(AddrIsAlignedByGranularity(addr + size)); CHECK(AddrIsAlignedByGranularity(addr + size));
@ -32,7 +43,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
uptr size, uptr size,
uptr redzone_size, uptr redzone_size,
u8 value) { u8 value) {
if (!flags()->poison_heap) return; if (!CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr)); CHECK(AddrIsInMem(addr));
FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value); FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
@ -61,10 +72,10 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
uptr end = ptr + size; uptr end = ptr + size;
if (common_flags()->verbosity) { if (Verbosity()) {
Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n", Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
poison ? "" : "un", ptr, end, size); poison ? "" : "un", ptr, end, size);
if (common_flags()->verbosity >= 2) if (Verbosity() >= 2)
PRINT_CURRENT_STACK(); PRINT_CURRENT_STACK();
} }
CHECK(size); CHECK(size);
@ -99,7 +110,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return; if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr; uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size; uptr end_addr = beg_addr + size;
VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
(void *)end_addr); (void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr); ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr); ShadowSegmentEndpoint end(end_addr);
@ -139,7 +150,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
if (!flags()->allow_user_poisoning || size == 0) return; if (!flags()->allow_user_poisoning || size == 0) return;
uptr beg_addr = (uptr)addr; uptr beg_addr = (uptr)addr;
uptr end_addr = beg_addr + size; uptr end_addr = beg_addr + size;
VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
(void *)end_addr); (void *)end_addr);
ShadowSegmentEndpoint beg(beg_addr); ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr); ShadowSegmentEndpoint end(end_addr);
@ -205,7 +216,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
__asan::AddressIsPoisoned(__p + __size - 1))) { \ __asan::AddressIsPoisoned(__p + __size - 1))) { \
GET_CURRENT_PC_BP_SP; \ GET_CURRENT_PC_BP_SP; \
uptr __bad = __asan_region_is_poisoned(__p, __size); \ uptr __bad = __asan_region_is_poisoned(__p, __size); \
__asan_report_error(pc, bp, sp, __bad, isWrite, __size);\ __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
} \ } \
} while (false); \ } while (false); \

View File

@ -17,6 +17,10 @@
namespace __asan { namespace __asan {
// Enable/disable memory poisoning.
void SetCanPoisonMemory(bool value);
bool CanPoisonMemory();
// Poisons the shadow memory for "size" bytes starting from "addr". // Poisons the shadow memory for "size" bytes starting from "addr".
void PoisonShadow(uptr addr, uptr size, u8 value); void PoisonShadow(uptr addr, uptr size, u8 value);
@ -32,7 +36,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
// performance-critical code with care. // performance-critical code with care.
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) { u8 value) {
DCHECK(flags()->poison_heap); DCHECK(CanPoisonMemory());
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg); uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW( uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1; aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
@ -58,15 +62,14 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
if (page_end != shadow_end) { if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end); REAL(memset)((void *)page_end, 0, shadow_end - page_end);
} }
void *res = MmapFixedNoReserve(page_beg, page_end - page_beg); ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr);
CHECK_EQ(page_beg, res);
} }
} }
} }
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
uptr aligned_addr, uptr size, uptr redzone_size, u8 value) { uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
DCHECK(flags()->poison_heap); DCHECK(CanPoisonMemory());
bool poison_partial = flags()->poison_partial; bool poison_partial = flags()->poison_partial;
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr); u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) { for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {

View File

@ -19,6 +19,7 @@
#include "asan_report.h" #include "asan_report.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_procmaps.h"
#include <pthread.h> #include <pthread.h>
@ -30,26 +31,52 @@
namespace __asan { namespace __asan {
void AsanOnSIGSEGV(int, void *siginfo, void *context) { void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread()); ScopedDeadlySignal signal_scope(GetCurrentThread());
uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
int code = (int)((siginfo_t*)siginfo)->si_code; int code = (int)((siginfo_t*)siginfo)->si_code;
// Write the first message using the bullet-proof write. // Write the first message using the bullet-proof write.
if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die(); if (18 != internal_write(2, "ASAN:DEADLYSIGNAL\n", 18)) Die();
uptr pc, sp, bp; SignalContext sig = SignalContext::Create(siginfo, context);
GetPcSpBp(context, &pc, &sp, &bp);
// Access at a reasonable offset above SP, or slightly below it (to account // Access at a reasonable offset above SP, or slightly below it (to account
// for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
// probably a stack overflow. // probably a stack overflow.
bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
#if __powerpc__
// Large stack frames can be allocated with e.g.
// lis r0,-10000
// stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000
// If the store faults then sp will not have been updated, so test above
// will not work, becase the fault address will be more than just "slightly"
// below sp.
if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) {
u32 inst = *(unsigned *)sig.pc;
u32 ra = (inst >> 16) & 0x1F;
u32 opcd = inst >> 26;
u32 xo = (inst >> 1) & 0x3FF;
// Check for store-with-update to sp. The instructions we accept are:
// stbu rs,d(ra) stbux rs,ra,rb
// sthu rs,d(ra) sthux rs,ra,rb
// stwu rs,d(ra) stwux rs,ra,rb
// stdu rs,ds(ra) stdux rs,ra,rb
// where ra is r1 (the stack pointer).
if (ra == 1 &&
(opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 ||
(opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181))))
IsStackAccess = true;
}
#endif // __powerpc__
// We also check si_code to filter out SEGV caused by something else other // We also check si_code to filter out SEGV caused by something else other
// then hitting the guard page or unmapped memory, like, for example, // then hitting the guard page or unmapped memory, like, for example,
// unaligned memory access. // unaligned memory access.
if (addr + 512 > sp && addr < sp + 0xFFFF && if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
(code == si_SEGV_MAPERR || code == si_SEGV_ACCERR)) ReportStackOverflow(sig);
ReportStackOverflow(pc, sp, bp, context, addr); else if (signo == SIGFPE)
ReportDeadlySignal("FPE", sig);
else else
ReportSIGSEGV("SEGV", pc, sp, bp, context, addr); ReportDeadlySignal("SEGV", sig);
} }
// ---------------------- TSD ---------------- {{{1 // ---------------------- TSD ---------------- {{{1

View File

@ -11,9 +11,13 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "asan_internal.h" #include "asan_internal.h"
using namespace __asan;
#if SANITIZER_CAN_USE_PREINIT_ARRAY #if SANITIZER_CAN_USE_PREINIT_ARRAY
// The symbol is called __local_asan_preinit, because it's not intended to be // The symbol is called __local_asan_preinit, because it's not intended to be
// exported. // exported.
// This code linked into the main executable when -fsanitize=address is in
// the link flags. It can only use exported interface functions.
__attribute__((section(".preinit_array"), used)) __attribute__((section(".preinit_array"), used))
void (*__local_asan_preinit)(void) = __asan_init; void (*__local_asan_preinit)(void) = __asan_init;
#endif #endif

View File

@ -9,6 +9,7 @@
// //
// This file contains error reporting code. // This file contains error reporting code.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "asan_flags.h" #include "asan_flags.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_mapping.h" #include "asan_mapping.h"
@ -25,7 +26,7 @@ namespace __asan {
// -------------------- User-specified callbacks ----------------- {{{1 // -------------------- User-specified callbacks ----------------- {{{1
static void (*error_report_callback)(const char*); static void (*error_report_callback)(const char*);
static char *error_message_buffer = 0; static char *error_message_buffer = nullptr;
static uptr error_message_buffer_pos = 0; static uptr error_message_buffer_pos = 0;
static uptr error_message_buffer_size = 0; static uptr error_message_buffer_size = 0;
@ -51,7 +52,7 @@ void AppendToErrorMessageBuffer(const char *buffer) {
buffer, remaining); buffer, remaining);
error_message_buffer[error_message_buffer_size - 1] = '\0'; error_message_buffer[error_message_buffer_size - 1] = '\0';
// FIXME: reallocate the buffer instead of truncating the message. // FIXME: reallocate the buffer instead of truncating the message.
error_message_buffer_pos += remaining > length ? length : remaining; error_message_buffer_pos += Min(remaining, length);
} }
} }
@ -85,6 +86,8 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator {
return Cyan(); return Cyan();
case kAsanUserPoisonedMemoryMagic: case kAsanUserPoisonedMemoryMagic:
case kAsanContiguousContainerOOBMagic: case kAsanContiguousContainerOOBMagic:
case kAsanAllocaLeftMagic:
case kAsanAllocaRightMagic:
return Blue(); return Blue();
case kAsanStackUseAfterScopeMagic: case kAsanStackUseAfterScopeMagic:
return Magenta(); return Magenta();
@ -171,6 +174,8 @@ static void PrintLegend(InternalScopedString *str) {
PrintShadowByte(str, " Intra object redzone: ", PrintShadowByte(str, " Intra object redzone: ",
kAsanIntraObjectRedzone); kAsanIntraObjectRedzone);
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic); PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
} }
void MaybeDumpInstructionBytes(uptr pc) { void MaybeDumpInstructionBytes(uptr pc) {
@ -275,9 +280,8 @@ static void PrintGlobalLocation(InternalScopedString *str,
str->append(":%d", g.location->column_no); str->append(":%d", g.location->column_no);
} }
bool DescribeAddressRelativeToGlobal(uptr addr, uptr size, static void DescribeAddressRelativeToGlobal(uptr addr, uptr size,
const __asan_global &g) { const __asan_global &g) {
if (!IsAddressNearGlobal(addr, g)) return false;
InternalScopedString str(4096); InternalScopedString str(4096);
Decorator d; Decorator d;
str.append("%s", d.Location()); str.append("%s", d.Location());
@ -300,6 +304,26 @@ bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
str.append("%s", d.EndLocation()); str.append("%s", d.EndLocation());
PrintGlobalNameIfASCII(&str, g); PrintGlobalNameIfASCII(&str, g);
Printf("%s", str.data()); Printf("%s", str.data());
}
static bool DescribeAddressIfGlobal(uptr addr, uptr size,
const char *bug_type) {
// Assume address is close to at most four globals.
const int kMaxGlobalsInReport = 4;
__asan_global globals[kMaxGlobalsInReport];
u32 reg_sites[kMaxGlobalsInReport];
int globals_num =
GetGlobalsForAddress(addr, globals, reg_sites, ARRAY_SIZE(globals));
if (globals_num == 0)
return false;
for (int i = 0; i < globals_num; i++) {
DescribeAddressRelativeToGlobal(addr, size, globals[i]);
if (0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
reg_sites[i]) {
Printf(" registered at:\n");
StackDepotGet(reg_sites[i]).Print();
}
}
return true; return true;
} }
@ -348,7 +372,7 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
uptr next_var_beg) { uptr next_var_beg) {
uptr var_end = var.beg + var.size; uptr var_end = var.beg + var.size;
uptr addr_end = addr + access_size; uptr addr_end = addr + access_size;
const char *pos_descr = 0; const char *pos_descr = nullptr;
// If the variable [var.beg, var_end) is the nearest variable to the // If the variable [var.beg, var_end) is the nearest variable to the
// current memory access, indicate it in the log. // current memory access, indicate it in the log.
if (addr >= var.beg) { if (addr >= var.beg) {
@ -519,7 +543,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
StackTrace alloc_stack = chunk.GetAllocStack(); StackTrace alloc_stack = chunk.GetAllocStack();
char tname[128]; char tname[128];
Decorator d; Decorator d;
AsanThreadContext *free_thread = 0; AsanThreadContext *free_thread = nullptr;
if (chunk.FreeTid() != kInvalidTid) { if (chunk.FreeTid() != kInvalidTid) {
free_thread = GetThreadContextByTidLocked(chunk.FreeTid()); free_thread = GetThreadContextByTidLocked(chunk.FreeTid());
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(), Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
@ -545,12 +569,12 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
DescribeThread(alloc_thread); DescribeThread(alloc_thread);
} }
void DescribeAddress(uptr addr, uptr access_size) { static void DescribeAddress(uptr addr, uptr access_size, const char *bug_type) {
// Check if this is shadow or shadow gap. // Check if this is shadow or shadow gap.
if (DescribeAddressIfShadow(addr)) if (DescribeAddressIfShadow(addr))
return; return;
CHECK(AddrIsInMem(addr)); CHECK(AddrIsInMem(addr));
if (DescribeAddressIfGlobal(addr, access_size)) if (DescribeAddressIfGlobal(addr, access_size, bug_type))
return; return;
if (DescribeAddressIfStack(addr, access_size)) if (DescribeAddressIfStack(addr, access_size))
return; return;
@ -572,6 +596,11 @@ void DescribeThread(AsanThreadContext *context) {
InternalScopedString str(1024); InternalScopedString str(1024);
str.append("Thread T%d%s", context->tid, str.append("Thread T%d%s", context->tid,
ThreadNameWithParenthesis(context->tid, tname, sizeof(tname))); ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
if (context->parent_tid == kInvalidTid) {
str.append(" created by unknown thread\n");
Printf("%s", str.data());
return;
}
str.append( str.append(
" created by T%d%s here:\n", context->parent_tid, " created by T%d%s here:\n", context->parent_tid,
ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname))); ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
@ -609,7 +638,7 @@ class ScopedInErrorReport {
} }
// If we're still not dead for some reason, use raw _exit() instead of // If we're still not dead for some reason, use raw _exit() instead of
// Die() to bypass any additional checks. // Die() to bypass any additional checks.
internal__exit(flags()->exitcode); internal__exit(common_flags()->exitcode);
} }
if (report) report_data = *report; if (report) report_data = *report;
report_happened = true; report_happened = true;
@ -641,40 +670,39 @@ class ScopedInErrorReport {
} }
}; };
void ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr) { void ReportStackOverflow(const SignalContext &sig) {
ScopedInErrorReport in_report; ScopedInErrorReport in_report;
Decorator d; Decorator d;
Printf("%s", d.Warning()); Printf("%s", d.Warning());
Report( Report(
"ERROR: AddressSanitizer: stack-overflow on address %p" "ERROR: AddressSanitizer: stack-overflow on address %p"
" (pc %p bp %p sp %p T%d)\n", " (pc %p bp %p sp %p T%d)\n",
(void *)addr, (void *)pc, (void *)bp, (void *)sp, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp,
GetCurrentTidOrInvalid()); GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
GET_STACK_TRACE_SIGNAL(pc, bp, context); GET_STACK_TRACE_SIGNAL(sig);
stack.Print(); stack.Print();
ReportErrorSummary("stack-overflow", &stack); ReportErrorSummary("stack-overflow", &stack);
} }
void ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp, void ReportDeadlySignal(const char *description, const SignalContext &sig) {
void *context, uptr addr) {
ScopedInErrorReport in_report; ScopedInErrorReport in_report;
Decorator d; Decorator d;
Printf("%s", d.Warning()); Printf("%s", d.Warning());
Report( Report(
"ERROR: AddressSanitizer: %s on unknown address %p" "ERROR: AddressSanitizer: %s on unknown address %p"
" (pc %p bp %p sp %p T%d)\n", " (pc %p bp %p sp %p T%d)\n",
description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp,
GetCurrentTidOrInvalid()); (void *)sig.sp, GetCurrentTidOrInvalid());
if (pc < GetPageSizeCached()) { if (sig.pc < GetPageSizeCached()) {
Report("Hint: pc points to the zero page.\n"); Report("Hint: pc points to the zero page.\n");
} }
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
GET_STACK_TRACE_SIGNAL(pc, bp, context); GET_STACK_TRACE_SIGNAL(sig);
stack.Print(); stack.Print();
MaybeDumpInstructionBytes(pc); MaybeDumpInstructionBytes(sig.pc);
Printf("AddressSanitizer can not provide additional info.\n"); Printf("AddressSanitizer can not provide additional info.\n");
ReportErrorSummary("SEGV", &stack); ReportErrorSummary(description, &stack);
} }
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
@ -800,8 +828,8 @@ void ReportStringFunctionMemoryRangesOverlap(const char *function,
bug_type, offset1, offset1 + length1, offset2, offset2 + length2); bug_type, offset1, offset1 + length1, offset2, offset2 + length2);
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
stack->Print(); stack->Print();
DescribeAddress((uptr)offset1, length1); DescribeAddress((uptr)offset1, length1, bug_type);
DescribeAddress((uptr)offset2, length2); DescribeAddress((uptr)offset2, length2, bug_type);
ReportErrorSummary(bug_type, stack); ReportErrorSummary(bug_type, stack);
} }
@ -814,7 +842,7 @@ void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size); Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
stack->Print(); stack->Print();
DescribeAddress(offset, size); DescribeAddress(offset, size, bug_type);
ReportErrorSummary(bug_type, stack); ReportErrorSummary(bug_type, stack);
} }
@ -829,6 +857,9 @@ void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
" old_mid : %p\n" " old_mid : %p\n"
" new_mid : %p\n", " new_mid : %p\n",
beg, end, old_mid, new_mid); beg, end, old_mid, new_mid);
uptr granularity = SHADOW_GRANULARITY;
if (!IsAligned(beg, granularity))
Report("ERROR: beg is not aligned by %d\n", granularity);
stack->Print(); stack->Print();
ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack); ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
} }
@ -866,15 +897,16 @@ void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
static NOINLINE void static NOINLINE void
ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) { ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) {
ScopedInErrorReport in_report; ScopedInErrorReport in_report;
const char *bug_type = "invalid-pointer-pair";
Decorator d; Decorator d;
Printf("%s", d.Warning()); Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2); Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2);
Printf("%s", d.EndWarning()); Printf("%s", d.EndWarning());
GET_STACK_TRACE_FATAL(pc, bp); GET_STACK_TRACE_FATAL(pc, bp);
stack.Print(); stack.Print();
DescribeAddress(a1, 1); DescribeAddress(a1, 1, bug_type);
DescribeAddress(a2, 1); DescribeAddress(a2, 1, bug_type);
ReportErrorSummary("invalid-pointer-pair", &stack); ReportErrorSummary(bug_type, &stack);
} }
static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
@ -925,13 +957,24 @@ void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
DescribeHeapAddress(addr, 1); DescribeHeapAddress(addr, 1);
} }
} // namespace __asan } // namespace __asan
// --------------------------- Interface --------------------- {{{1 // --------------------------- Interface --------------------- {{{1
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
uptr access_size) { uptr access_size, u32 exp) {
ENABLE_FRAME_POINTER;
// Optimization experiments.
// The experiments can be used to evaluate potential optimizations that remove
// instrumentation (assess false negatives). Instead of completely removing
// some instrumentation, compiler can emit special calls into runtime
// (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass
// mask of experiments (exp).
// The reaction to a non-zero value of exp is to be defined.
(void)exp;
// Determine the error type. // Determine the error type.
const char *bug_descr = "unknown-crash"; const char *bug_descr = "unknown-crash";
if (AddrIsInMem(addr)) { if (AddrIsInMem(addr)) {
@ -980,6 +1023,10 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
case kAsanIntraObjectRedzone: case kAsanIntraObjectRedzone:
bug_descr = "intra-object-overflow"; bug_descr = "intra-object-overflow";
break; break;
case kAsanAllocaLeftMagic:
case kAsanAllocaRightMagic:
bug_descr = "dynamic-stack-buffer-overflow";
break;
} }
} }
@ -1006,7 +1053,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
GET_STACK_TRACE_FATAL(pc, bp); GET_STACK_TRACE_FATAL(pc, bp);
stack.Print(); stack.Print();
DescribeAddress(addr, access_size); DescribeAddress(addr, access_size, bug_descr);
ReportErrorSummary(bug_descr, &stack); ReportErrorSummary(bug_descr, &stack);
PrintShadowMemoryForAddress(addr); PrintShadowMemoryForAddress(addr);
} }
@ -1024,7 +1071,7 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
void __asan_describe_address(uptr addr) { void __asan_describe_address(uptr addr) {
// Thread registry must be locked while we're describing an address. // Thread registry must be locked while we're describing an address.
asanThreadRegistry().Lock(); asanThreadRegistry().Lock();
DescribeAddress(addr, 1); DescribeAddress(addr, 1, "");
asanThreadRegistry().Unlock(); asanThreadRegistry().Unlock();
} }
@ -1069,7 +1116,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_ptr_cmp(void *a, void *b) { void __sanitizer_ptr_cmp(void *a, void *b) {
CheckForInvalidPointerPair(a, b); CheckForInvalidPointerPair(a, b);
} }
} // extern "C" } // extern "C"
#if !SANITIZER_SUPPORTS_WEAK_HOOKS #if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default implementation of __asan_on_error that does nothing // Provide default implementation of __asan_on_error that does nothing

View File

@ -31,29 +31,25 @@ struct AddressDescription {
const char *region_kind; const char *region_kind;
}; };
// Returns the number of globals close to the provided address and copies
// them to "globals" array.
int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
int max_globals);
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
// The following functions prints address description depending // The following functions prints address description depending
// on the memory type (shadow/heap/stack/global). // on the memory type (shadow/heap/stack/global).
void DescribeHeapAddress(uptr addr, uptr access_size); void DescribeHeapAddress(uptr addr, uptr access_size);
bool DescribeAddressIfGlobal(uptr addr, uptr access_size);
bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g);
bool IsAddressNearGlobal(uptr addr, const __asan_global &g);
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr, bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
bool print = true); bool print = true);
bool ParseFrameDescription(const char *frame_descr, bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars); InternalMmapVector<StackVarDescr> *vars);
bool DescribeAddressIfStack(uptr addr, uptr access_size); bool DescribeAddressIfStack(uptr addr, uptr access_size);
// Determines memory type on its own.
void DescribeAddress(uptr addr, uptr access_size);
void DescribeThread(AsanThreadContext *context); void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports. // Different kinds of error reports.
void NORETURN void NORETURN ReportStackOverflow(const SignalContext &sig);
ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr); void NORETURN ReportDeadlySignal(const char* description,
void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp, const SignalContext &sig);
void *context, uptr addr);
void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack); BufferedStackTrace *free_stack);
void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack); void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);

View File

@ -9,6 +9,7 @@
// //
// Main file of the ASan run-time library. // Main file of the ASan run-time library.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "asan_activation.h" #include "asan_activation.h"
#include "asan_allocator.h" #include "asan_allocator.h"
#include "asan_interceptors.h" #include "asan_interceptors.h"
@ -19,12 +20,15 @@
#include "asan_report.h" #include "asan_report.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "asan_stats.h" #include "asan_stats.h"
#include "asan_suppressions.h"
#include "asan_thread.h" #include "asan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_symbolizer.h" #include "sanitizer_common/sanitizer_symbolizer.h"
#include "lsan/lsan_common.h" #include "lsan/lsan_common.h"
#include "ubsan/ubsan_init.h"
#include "ubsan/ubsan_platform.h"
int __asan_option_detect_stack_use_after_return; // Global interface symbol. int __asan_option_detect_stack_use_after_return; // Global interface symbol.
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan. uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
@ -51,13 +55,6 @@ static void AsanDie() {
UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
} }
} }
if (common_flags()->coverage)
__sanitizer_cov_dump();
if (death_callback)
death_callback();
if (flags()->abort_on_error)
Abort();
internal__exit(flags()->exitcode);
} }
static void AsanCheckFailed(const char *file, int line, const char *cond, static void AsanCheckFailed(const char *file, int line, const char *cond,
@ -69,265 +66,9 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
Die(); Die();
} }
// -------------------------- Flags ------------------------- {{{1
static const int kDefaultMallocContextSize = 30;
Flags asan_flags_dont_use_directly; // use via flags().
static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
}
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
// Stringize the macro value.
# define ASAN_STRINGIZE(x) #x
# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
#else
return "";
#endif
}
static void ParseFlagsFromString(Flags *f, const char *str) {
CommonFlags *cf = common_flags();
ParseCommonFlagsFromString(cf, str);
CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
// Please write meaningful flag descriptions when adding new flags.
ParseFlag(str, &f->quarantine_size, "quarantine_size",
"Size (in bytes) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.");
ParseFlag(str, &f->redzone, "redzone",
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.");
ParseFlag(str, &f->max_redzone, "max_redzone",
"Maximal size (in bytes) of redzones around heap objects.");
CHECK_GE(f->redzone, 16);
CHECK_GE(f->max_redzone, f->redzone);
CHECK_LE(f->max_redzone, 2048);
CHECK(IsPowerOfTwo(f->redzone));
CHECK(IsPowerOfTwo(f->max_redzone));
ParseFlag(str, &f->debug, "debug",
"If set, prints some debugging information and does additional checks.");
ParseFlag(str, &f->report_globals, "report_globals",
"Controls the way to handle globals (0 - don't detect buffer overflow on "
"globals, 1 - detect buffer overflow, 2 - print data about registered "
"globals).");
ParseFlag(str, &f->check_initialization_order,
"check_initialization_order",
"If set, attempts to catch initialization order issues.");
ParseFlag(str, &f->replace_str, "replace_str",
"If set, uses custom wrappers and replacements for libc string functions "
"to find more errors.");
ParseFlag(str, &f->replace_intrin, "replace_intrin",
"If set, uses custom wrappers for memset/memcpy/memmove intinsics.");
ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free",
"Ignore invalid free() calls to work around some bugs. Used on OS X "
"only.");
ParseFlag(str, &f->detect_stack_use_after_return,
"detect_stack_use_after_return",
"Enables stack-use-after-return checking at run-time.");
ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log",
"Minimum fake stack size log.");
ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log",
"Maximum fake stack size log.");
ParseFlag(str, &f->uar_noreserve, "uar_noreserve",
"Use mmap with 'norserve' flag to allocate fake stack.");
ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size",
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
"bytes that will be filled with malloc_fill_byte on malloc.");
ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte",
"Value used to fill the newly allocated memory.");
ParseFlag(str, &f->exitcode, "exitcode",
"Override the program exit status if the tool found an error.");
ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning",
"If set, user may manually mark memory regions as poisoned or "
"unpoisoned.");
ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying",
"Number of seconds to sleep between printing an error report and "
"terminating the program. Useful for debugging purposes (e.g. when one "
"needs to attach gdb).");
ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size",
"Allows the users to work around the bug in Nvidia drivers prior to "
"295.*.");
ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit",
"If set, explicitly unmaps the (huge) shadow at exit.");
ParseFlag(str, &f->abort_on_error, "abort_on_error",
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.");
ParseFlag(str, &f->print_stats, "print_stats",
"Print various statistics after printing an error message or if "
"atexit=1.");
ParseFlag(str, &f->print_legend, "print_legend",
"Print the legend for the shadow bytes.");
ParseFlag(str, &f->atexit, "atexit",
"If set, prints ASan exit stats even after program terminates "
"successfully.");
ParseFlag(str, &f->allow_reexec, "allow_reexec",
"Allow the tool to re-exec the program. This may interfere badly with "
"the debugger.");
ParseFlag(str, &f->print_full_thread_history,
"print_full_thread_history",
"If set, prints thread creation stacks for the threads involved in the "
"report and their ancestors up to the main thread.");
ParseFlag(str, &f->poison_heap, "poison_heap",
"Poison (or not) the heap memory on [de]allocation. Zero value is useful "
"for benchmarking the allocator or instrumentator.");
ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie",
"Poison (or not) the array cookie after operator new[].");
ParseFlag(str, &f->poison_partial, "poison_partial",
"If true, poison partially addressable 8-byte aligned words "
"(default=true). This flag affects heap and global buffers, but not "
"stack buffers.");
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch",
"Report errors on malloc/delete, new/free, new/delete[], etc.");
ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch",
"Report errors on mismatch betwen size of new and delete.");
ParseFlag(str, &f->strict_memcmp, "strict_memcmp",
"If true, assume that memcmp(p1, p2, n) always reads n bytes before "
"comparing p1 and p2.");
ParseFlag(str, &f->strict_init_order, "strict_init_order",
"If true, assume that dynamic initializers can never access globals from "
"other modules, even if the latter are already initialized.");
ParseFlag(str, &f->start_deactivated, "start_deactivated",
"If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
"poisoning) to reduce memory consumption as much as possible, and "
"restores them to original values when the first instrumented module is "
"loaded into the process. This is mainly intended to be used on "
"Android. ");
ParseFlag(str, &f->detect_invalid_pointer_pairs,
"detect_invalid_pointer_pairs",
"If non-zero, try to detect operations like <, <=, >, >= and - on "
"invalid pointer pairs (e.g. when pointers belong to different objects). "
"The bigger the value the harder we try.");
ParseFlag(str, &f->detect_container_overflow,
"detect_container_overflow",
"If true, honor the container overflow annotations. "
"See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow");
ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation",
"If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables "
"have different sizes");
ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes",
"If true, dump 16 bytes starting at the instruction that caused SEGV");
}
void InitializeFlags(Flags *f, const char *env) {
CommonFlags *cf = common_flags();
SetCommonFlagsDefaults(cf);
cf->detect_leaks = CAN_SANITIZE_LEAKS;
cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf->malloc_context_size = kDefaultMallocContextSize;
cf->intercept_tls_get_addr = true;
cf->coverage = false;
internal_memset(f, 0, sizeof(*f));
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
f->redzone = 16;
f->max_redzone = 2048;
f->debug = false;
f->report_globals = 1;
f->check_initialization_order = false;
f->replace_str = true;
f->replace_intrin = true;
f->mac_ignore_invalid_free = false;
f->detect_stack_use_after_return = false; // Also needs the compiler flag.
f->min_uar_stack_size_log = 16; // We can't do smaller anyway.
f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread.
f->uar_noreserve = false;
f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K.
f->malloc_fill_byte = 0xbe;
f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
f->allow_user_poisoning = true;
f->sleep_before_dying = 0;
f->check_malloc_usable_size = true;
f->unmap_shadow_on_exit = false;
f->abort_on_error = false;
f->print_stats = false;
f->print_legend = true;
f->atexit = false;
f->allow_reexec = true;
f->print_full_thread_history = true;
f->poison_heap = true;
f->poison_array_cookie = true;
f->poison_partial = true;
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// https://code.google.com/p/address-sanitizer/issues/detail?id=131
// https://code.google.com/p/address-sanitizer/issues/detail?id=309
// TODO(glider,timurrrr): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
f->new_delete_type_mismatch = true;
f->strict_memcmp = true;
f->strict_init_order = false;
f->start_deactivated = false;
f->detect_invalid_pointer_pairs = 0;
f->detect_container_overflow = true;
f->detect_odr_violation = 2;
f->dump_instruction_bytes = false;
// Override from compile definition.
ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
// Override from user-specified string.
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
VReport(1, "Using the defaults from __asan_default_options: %s\n",
MaybeCallAsanDefaultOptions());
// Override from command line.
ParseFlagsFromString(f, env);
if (common_flags()->help) {
PrintFlagDescriptions();
}
if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) {
Report("%s: detect_leaks is not supported on this platform.\n",
SanitizerToolName);
cf->detect_leaks = false;
}
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
if (f->strict_init_order) {
f->check_initialization_order = true;
}
}
// Parse flags that may change between startup and activation.
// On Android they come from a system property.
// On other platforms this is no-op.
void ParseExtraActivationFlags() {
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
ParseFlagsFromString(flags(), buf);
if (buf[0] != '\0')
VReport(1, "Extra activation flags: %s\n", buf);
}
// -------------------------- Globals --------------------- {{{1 // -------------------------- Globals --------------------- {{{1
int asan_inited; int asan_inited;
bool asan_init_is_running; bool asan_init_is_running;
void (*death_callback)(void);
#if !ASAN_FIXED_MAPPING #if !ASAN_FIXED_MAPPING
uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;
@ -341,17 +82,22 @@ void ShowStatsAndAbort() {
// ---------------------- mmap -------------------- {{{1 // ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end]. // Reserve memory range [beg, end].
static void ReserveShadowMemoryRange(uptr beg, uptr end) { // We need to use inclusive range because end+1 may not be representable.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
CHECK_EQ((beg % GetPageSizeCached()), 0); CHECK_EQ((beg % GetPageSizeCached()), 0);
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0); CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
uptr size = end - beg + 1; uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
void *res = MmapFixedNoReserve(beg, size); void *res = MmapFixedNoReserve(beg, size, name);
if (res != (void*)beg) { if (res != (void*)beg) {
Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n", size); "Perhaps you're using ulimit -v\n", size);
Abort(); Abort();
} }
if (common_flags()->no_huge_pages_for_shadow)
NoHugePagesInRegion(beg, size);
if (common_flags()->use_madv_dontdump)
DontDumpShadowMemory(beg, size);
} }
// --------------- LowLevelAllocateCallbac ---------- {{{1 // --------------- LowLevelAllocateCallbac ---------- {{{1
@ -362,11 +108,15 @@ static void OnLowLevelAllocate(uptr ptr, uptr size) {
// -------------------------- Run-time entry ------------------- {{{1 // -------------------------- Run-time entry ------------------- {{{1
// exported functions // exported functions
#define ASAN_REPORT_ERROR(type, is_write, size) \ #define ASAN_REPORT_ERROR(type, is_write, size) \
extern "C" NOINLINE INTERFACE_ATTRIBUTE \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \
void __asan_report_ ## type ## size(uptr addr); \ void __asan_report_ ## type ## size(uptr addr) { \
void __asan_report_ ## type ## size(uptr addr) { \
GET_CALLER_PC_BP_SP; \ GET_CALLER_PC_BP_SP; \
__asan_report_error(pc, bp, sp, addr, is_write, size); \ __asan_report_error(pc, bp, sp, addr, is_write, size, 0); \
} \
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \
GET_CALLER_PC_BP_SP; \
__asan_report_error(pc, bp, sp, addr, is_write, size, exp); \
} }
ASAN_REPORT_ERROR(load, false, 1) ASAN_REPORT_ERROR(load, false, 1)
@ -382,18 +132,20 @@ ASAN_REPORT_ERROR(store, true, 16)
#define ASAN_REPORT_ERROR_N(type, is_write) \ #define ASAN_REPORT_ERROR_N(type, is_write) \
extern "C" NOINLINE INTERFACE_ATTRIBUTE \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \
void __asan_report_ ## type ## _n(uptr addr, uptr size); \
void __asan_report_ ## type ## _n(uptr addr, uptr size) { \ void __asan_report_ ## type ## _n(uptr addr, uptr size) { \
GET_CALLER_PC_BP_SP; \ GET_CALLER_PC_BP_SP; \
__asan_report_error(pc, bp, sp, addr, is_write, size); \ __asan_report_error(pc, bp, sp, addr, is_write, size, 0); \
} \
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \
GET_CALLER_PC_BP_SP; \
__asan_report_error(pc, bp, sp, addr, is_write, size, exp); \
} }
ASAN_REPORT_ERROR_N(load, false) ASAN_REPORT_ERROR_N(load, false)
ASAN_REPORT_ERROR_N(store, true) ASAN_REPORT_ERROR_N(store, true)
#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \ #define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg) \
extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_##type##size(uptr addr); \
void __asan_##type##size(uptr addr) { \
uptr sp = MEM_TO_SHADOW(addr); \ uptr sp = MEM_TO_SHADOW(addr); \
uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
: *reinterpret_cast<u16 *>(sp); \ : *reinterpret_cast<u16 *>(sp); \
@ -405,10 +157,19 @@ ASAN_REPORT_ERROR_N(store, true)
*__asan_test_only_reported_buggy_pointer = addr; \ *__asan_test_only_reported_buggy_pointer = addr; \
} else { \ } else { \
GET_CALLER_PC_BP_SP; \ GET_CALLER_PC_BP_SP; \
__asan_report_error(pc, bp, sp, addr, is_write, size); \ __asan_report_error(pc, bp, sp, addr, is_write, size, exp_arg); \
} \ } \
} \ } \
} \ }
#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
void __asan_##type##size(uptr addr) { \
ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0) \
} \
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
void __asan_exp_##type##size(uptr addr, u32 exp) { \
ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp) \
} }
ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1) ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1)
@ -423,18 +184,38 @@ ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8)
ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16) ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16)
extern "C" extern "C"
NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN(uptr addr, uptr size) { NOINLINE INTERFACE_ATTRIBUTE
void __asan_loadN(uptr addr, uptr size) {
if (__asan_region_is_poisoned(addr, size)) { if (__asan_region_is_poisoned(addr, size)) {
GET_CALLER_PC_BP_SP; GET_CALLER_PC_BP_SP;
__asan_report_error(pc, bp, sp, addr, false, size); __asan_report_error(pc, bp, sp, addr, false, size, 0);
} }
} }
extern "C" extern "C"
NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN(uptr addr, uptr size) { NOINLINE INTERFACE_ATTRIBUTE
void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
if (__asan_region_is_poisoned(addr, size)) { if (__asan_region_is_poisoned(addr, size)) {
GET_CALLER_PC_BP_SP; GET_CALLER_PC_BP_SP;
__asan_report_error(pc, bp, sp, addr, true, size); __asan_report_error(pc, bp, sp, addr, false, size, exp);
}
}
extern "C"
NOINLINE INTERFACE_ATTRIBUTE
void __asan_storeN(uptr addr, uptr size) {
if (__asan_region_is_poisoned(addr, size)) {
GET_CALLER_PC_BP_SP;
__asan_report_error(pc, bp, sp, addr, true, size, 0);
}
}
extern "C"
NOINLINE INTERFACE_ATTRIBUTE
void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
if (__asan_region_is_poisoned(addr, size)) {
GET_CALLER_PC_BP_SP;
__asan_report_error(pc, bp, sp, addr, true, size, exp);
} }
} }
@ -453,26 +234,39 @@ static NOINLINE void force_interface_symbols() {
case 3: __asan_report_load4(0); break; case 3: __asan_report_load4(0); break;
case 4: __asan_report_load8(0); break; case 4: __asan_report_load8(0); break;
case 5: __asan_report_load16(0); break; case 5: __asan_report_load16(0); break;
case 6: __asan_report_store1(0); break; case 6: __asan_report_load_n(0, 0); break;
case 7: __asan_report_store2(0); break; case 7: __asan_report_store1(0); break;
case 8: __asan_report_store4(0); break; case 8: __asan_report_store2(0); break;
case 9: __asan_report_store8(0); break; case 9: __asan_report_store4(0); break;
case 10: __asan_report_store16(0); break; case 10: __asan_report_store8(0); break;
case 12: __asan_register_globals(0, 0); break; case 11: __asan_report_store16(0); break;
case 13: __asan_unregister_globals(0, 0); break; case 12: __asan_report_store_n(0, 0); break;
case 14: __asan_set_death_callback(0); break; case 13: __asan_report_exp_load1(0, 0); break;
case 15: __asan_set_error_report_callback(0); break; case 14: __asan_report_exp_load2(0, 0); break;
case 16: __asan_handle_no_return(); break; case 15: __asan_report_exp_load4(0, 0); break;
case 17: __asan_address_is_poisoned(0); break; case 16: __asan_report_exp_load8(0, 0); break;
case 25: __asan_poison_memory_region(0, 0); break; case 17: __asan_report_exp_load16(0, 0); break;
case 26: __asan_unpoison_memory_region(0, 0); break; case 18: __asan_report_exp_load_n(0, 0, 0); break;
case 27: __asan_set_error_exit_code(0); break; case 19: __asan_report_exp_store1(0, 0); break;
case 30: __asan_before_dynamic_init(0); break; case 20: __asan_report_exp_store2(0, 0); break;
case 31: __asan_after_dynamic_init(); break; case 21: __asan_report_exp_store4(0, 0); break;
case 32: __asan_poison_stack_memory(0, 0); break; case 22: __asan_report_exp_store8(0, 0); break;
case 33: __asan_unpoison_stack_memory(0, 0); break; case 23: __asan_report_exp_store16(0, 0); break;
case 34: __asan_region_is_poisoned(0, 0); break; case 24: __asan_report_exp_store_n(0, 0, 0); break;
case 35: __asan_describe_address(0); break; case 25: __asan_register_globals(nullptr, 0); break;
case 26: __asan_unregister_globals(nullptr, 0); break;
case 27: __asan_set_death_callback(nullptr); break;
case 28: __asan_set_error_report_callback(nullptr); break;
case 29: __asan_handle_no_return(); break;
case 30: __asan_address_is_poisoned(nullptr); break;
case 31: __asan_poison_memory_region(nullptr, 0); break;
case 32: __asan_unpoison_memory_region(nullptr, 0); break;
case 34: __asan_before_dynamic_init(nullptr); break;
case 35: __asan_after_dynamic_init(); break;
case 36: __asan_poison_stack_memory(0, 0); break;
case 37: __asan_unpoison_stack_memory(0, 0); break;
case 38: __asan_region_is_poisoned(0, 0); break;
case 39: __asan_describe_address(0); break;
} }
} }
@ -496,8 +290,28 @@ static void InitializeHighMemEnd() {
CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0); CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0);
} }
static void ProtectGap(uptr a, uptr size) { static void ProtectGap(uptr addr, uptr size) {
CHECK_EQ(a, (uptr)Mprotect(a, size)); void *res = MmapNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
// A few pages at the start of the address space can not be protected.
// But we really want to protect as much as possible, to prevent this memory
// being returned as a result of a non-FIXED mmap().
if (addr == kZeroBaseShadowStart) {
uptr step = GetPageSizeCached();
while (size > step && addr < kZeroBaseMaxShadowStart) {
addr += step;
size -= step;
void *res = MmapNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
}
}
Report("ERROR: Failed to protect the shadow gap. "
"ASan cannot proceed correctly. ABORTING.\n");
DumpProcessMap();
Die();
} }
static void PrintAddressSpaceLayout() { static void PrintAddressSpaceLayout() {
@ -536,13 +350,13 @@ static void PrintAddressSpaceLayout() {
Printf("\n"); Printf("\n");
Printf("redzone=%zu\n", (uptr)flags()->redzone); Printf("redzone=%zu\n", (uptr)flags()->redzone);
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone); Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20); Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
Printf("malloc_context_size=%zu\n", Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size); (uptr)common_flags()->malloc_context_size);
Printf("SHADOW_SCALE: %zx\n", (uptr)SHADOW_SCALE); Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY); Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
Printf("SHADOW_OFFSET: %zx\n", (uptr)SHADOW_OFFSET); Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
if (kMidMemBeg) if (kMidMemBeg)
CHECK(kMidShadowBeg > kLowShadowEnd && CHECK(kMidShadowBeg > kLowShadowEnd &&
@ -556,10 +370,19 @@ static void AsanInitInternal() {
CHECK(!asan_init_is_running && "ASan init calls itself!"); CHECK(!asan_init_is_running && "ASan init calls itself!");
asan_init_is_running = true; asan_init_is_running = true;
CacheBinaryName();
// Initialize flags. This must be done early, because most of the // Initialize flags. This must be done early, because most of the
// initialization steps look at flags(). // initialization steps look at flags().
const char *options = GetEnv("ASAN_OPTIONS"); InitializeFlags();
InitializeFlags(flags(), options);
CheckVMASize();
AsanCheckIncompatibleRT();
AsanCheckDynamicRTPrereqs();
SetCanPoisonMemory(flags()->poison_heap);
SetMallocContextSize(common_flags()->malloc_context_size);
InitializeHighMemEnd(); InitializeHighMemEnd();
@ -567,24 +390,15 @@ static void AsanInitInternal() {
AsanDoesNotSupportStaticLinkage(); AsanDoesNotSupportStaticLinkage();
// Install tool-specific callbacks in sanitizer_common. // Install tool-specific callbacks in sanitizer_common.
SetDieCallback(AsanDie); AddDieCallback(AsanDie);
SetCheckFailedCallback(AsanCheckFailed); SetCheckFailedCallback(AsanCheckFailed);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer); SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
if (!flags()->start_deactivated)
ParseExtraActivationFlags();
__sanitizer_set_report_path(common_flags()->log_path); __sanitizer_set_report_path(common_flags()->log_path);
// Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return = __asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return; flags()->detect_stack_use_after_return;
CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
if (options) {
VReport(1, "Parsed ASAN_OPTIONS: %s\n", options);
}
if (flags()->start_deactivated)
AsanStartDeactivated();
// Re-exec ourselves if we need to set additional env or command line args. // Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec(); MaybeReexec();
@ -615,17 +429,16 @@ static void AsanInitInternal() {
} }
#endif #endif
if (common_flags()->verbosity) if (Verbosity()) PrintAddressSpaceLayout();
PrintAddressSpaceLayout();
DisableCoreDumperIfNecessary(); DisableCoreDumperIfNecessary();
if (full_shadow_is_available) { if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left. // mmap the low shadow plus at least one page at the left.
if (kLowShadowBeg) if (kLowShadowBeg)
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd); ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the high shadow. // mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd); ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gap. // protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
@ -634,11 +447,11 @@ static void AsanInitInternal() {
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) { MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd); CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left. // mmap the low shadow plus at least one page at the left.
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd); ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the mid shadow. // mmap the mid shadow.
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd); ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
// mmap the high shadow. // mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd); ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gaps. // protect the gaps.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1); ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
@ -646,14 +459,21 @@ static void AsanInitInternal() {
} else { } else {
Report("Shadow memory range interleaves with an existing memory mapping. " Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n"); "ASan cannot proceed correctly. ABORTING.\n");
Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
shadow_start, kHighShadowEnd);
DumpProcessMap(); DumpProcessMap();
Die(); Die();
} }
AsanTSDInit(PlatformTSDDtor); AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnSIGSEGV); InstallDeadlySignalHandlers(AsanOnDeadlySignal);
InitializeAllocator(); AllocatorOptions allocator_options;
allocator_options.SetFrom(flags(), common_flags());
InitializeAllocator(allocator_options);
MaybeStartBackgroudThread();
SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads. // should be set to 1 prior to initializing the threads.
@ -663,32 +483,40 @@ static void AsanInitInternal() {
if (flags()->atexit) if (flags()->atexit)
Atexit(asan_atexit); Atexit(asan_atexit);
if (common_flags()->coverage) { InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
__sanitizer_cov_init();
Atexit(__sanitizer_cov_dump); // Now that ASan runtime is (mostly) initialized, deactivate it if
} // necessary, so that it can be re-activated when requested.
if (flags()->start_deactivated)
AsanDeactivate();
// interceptors // interceptors
InitTlsSize(); InitTlsSize();
// Create main thread. // Create main thread.
AsanThread *main_thread = AsanThread::Create(0, 0); AsanThread *main_thread = AsanThread::Create(
CreateThreadContextArgs create_main_args = { main_thread, 0 }; /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
u32 main_tid = asanThreadRegistry().CreateThread( /* stack */ nullptr, /* detached */ true);
0, true, 0, &create_main_args); CHECK_EQ(0, main_thread->tid());
CHECK_EQ(0, main_tid);
SetCurrentThread(main_thread); SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid()); main_thread->ThreadStart(internal_getpid(),
/* signal_thread_is_registered */ nullptr);
force_interface_symbols(); // no-op. force_interface_symbols(); // no-op.
SanitizerInitializeUnwinder(); SanitizerInitializeUnwinder();
#if CAN_SANITIZE_LEAKS #if CAN_SANITIZE_LEAKS
__lsan::InitCommonLsan(false); __lsan::InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck); Atexit(__lsan::DoLeakCheck);
} }
#endif // CAN_SANITIZE_LEAKS #endif // CAN_SANITIZE_LEAKS
#if CAN_SANITIZE_UB
__ubsan::InitAsPlugin();
#endif
InitializeSuppressions();
VReport(1, "AddressSanitizer Init done\n"); VReport(1, "AddressSanitizer Init done\n");
} }
@ -700,46 +528,38 @@ void AsanInitFromRtl() {
#if ASAN_DYNAMIC #if ASAN_DYNAMIC
// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable // Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable
// (and thus normal initializer from .preinit_array haven't run). // (and thus normal initializers from .preinit_array or modules haven't run).
class AsanInitializer { class AsanInitializer {
public: // NOLINT public: // NOLINT
AsanInitializer() { AsanInitializer() {
AsanCheckIncompatibleRT(); AsanInitFromRtl();
AsanCheckDynamicRTPrereqs();
if (UNLIKELY(!asan_inited))
__asan_init();
} }
}; };
static AsanInitializer asan_initializer; static AsanInitializer asan_initializer;
#endif // ASAN_DYNAMIC #endif // ASAN_DYNAMIC
} // namespace __asan } // namespace __asan
// ---------------------- Interface ---------------- {{{1 // ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT using namespace __asan; // NOLINT
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char* __asan_default_options() { return ""; }
} // extern "C"
#endif
int NOINLINE __asan_set_error_exit_code(int exit_code) {
int old = flags()->exitcode;
flags()->exitcode = exit_code;
return old;
}
void NOINLINE __asan_handle_no_return() { void NOINLINE __asan_handle_no_return() {
int local_stack; int local_stack;
AsanThread *curr_thread = GetCurrentThread(); AsanThread *curr_thread = GetCurrentThread();
CHECK(curr_thread);
uptr PageSize = GetPageSizeCached(); uptr PageSize = GetPageSizeCached();
uptr top = curr_thread->stack_top(); uptr top, bottom;
uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1); if (curr_thread) {
top = curr_thread->stack_top();
bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
} else {
// If we haven't seen this thread, try asking the OS for stack bounds.
uptr tls_addr, tls_size, stack_size;
GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr,
&tls_size);
top = bottom + stack_size;
}
static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
if (top - bottom > kMaxExpectedCleanupSize) { if (top - bottom > kMaxExpectedCleanupSize) {
static bool reported_warning = false; static bool reported_warning = false;
@ -755,18 +575,21 @@ void NOINLINE __asan_handle_no_return() {
return; return;
} }
PoisonShadow(bottom, top - bottom, 0); PoisonShadow(bottom, top - bottom, 0);
if (curr_thread->has_fake_stack()) if (curr_thread && curr_thread->has_fake_stack())
curr_thread->fake_stack()->HandleNoReturn(); curr_thread->fake_stack()->HandleNoReturn();
} }
void NOINLINE __asan_set_death_callback(void (*callback)(void)) { void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
death_callback = callback; SetUserDieCallback(callback);
} }
// Initialize as requested from instrumented application code. // Initialize as requested from instrumented application code.
// We use this call as a trigger to wake up ASan from deactivated state. // We use this call as a trigger to wake up ASan from deactivated state.
void __asan_init() { void __asan_init() {
AsanCheckIncompatibleRT();
AsanActivate(); AsanActivate();
AsanInitInternal(); AsanInitInternal();
} }
void __asan_version_mismatch_check() {
// Do nothing.
}

View File

@ -11,6 +11,21 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_stack.h" #include "asan_stack.h"
#include "sanitizer_common/sanitizer_atomic.h"
namespace __asan {
static atomic_uint32_t malloc_context_size;
void SetMallocContextSize(u32 size) {
atomic_store(&malloc_context_size, size, memory_order_release);
}
u32 GetMallocContextSize() {
return atomic_load(&malloc_context_size, memory_order_acquire);
}
} // namespace __asan
// ------------------ Interface -------------- {{{1 // ------------------ Interface -------------- {{{1

View File

@ -9,6 +9,7 @@
// //
// ASan-private header for asan_stack.cc. // ASan-private header for asan_stack.cc.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifndef ASAN_STACK_H #ifndef ASAN_STACK_H
#define ASAN_STACK_H #define ASAN_STACK_H
@ -19,6 +20,11 @@
namespace __asan { namespace __asan {
static const u32 kDefaultMallocContextSize = 30;
void SetMallocContextSize(u32 size);
u32 GetMallocContextSize();
// Get the stack trace with the given pc and bp. // Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace. // The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame. // The bp may refer to the current frame or to the caller's frame.
@ -41,15 +47,15 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
uptr stack_bottom = t->stack_bottom(); uptr stack_bottom = t->stack_bottom();
ScopedUnwinding unwind_scope(t); ScopedUnwinding unwind_scope(t);
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast); stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
} else if (t == 0 && !fast) { } else if (!t && !fast) {
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */ /* If GetCurrentThread() has failed, try to do slow unwind anyways. */
stack->Unwind(max_depth, pc, bp, context, 0, 0, false); stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
} }
} }
#endif // SANITIZER_WINDOWS #endif // SANITIZER_WINDOWS
} }
} // namespace __asan } // namespace __asan
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors // NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally // as early as possible (in functions exposed to the user), as we generally
@ -76,9 +82,10 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
common_flags()->fast_unwind_on_fatal) common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \ #define GET_STACK_TRACE_SIGNAL(sig) \
BufferedStackTrace stack; \ BufferedStackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, \
(sig).pc, (sig).bp, (sig).context, \
common_flags()->fast_unwind_on_fatal) common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \ #define GET_STACK_TRACE_FATAL_HERE \
@ -90,9 +97,8 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
#define GET_STACK_TRACE_THREAD \ #define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true) GET_STACK_TRACE(kStackTraceMax, true)
#define GET_STACK_TRACE_MALLOC \ #define GET_STACK_TRACE_MALLOC \
GET_STACK_TRACE(common_flags()->malloc_context_size, \ GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc)
common_flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC #define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
@ -108,4 +114,4 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
stack.Print(); \ stack.Print(); \
} }
#endif // ASAN_STACK_H #endif // ASAN_STACK_H

View File

@ -49,12 +49,8 @@ void AsanStats::Print() {
(mmaped-munmaped)>>20, mmaped>>20, munmaped>>20, (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20,
mmaps, munmaps); mmaps, munmaps);
PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size); PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
PrintMallocStatsArray(" frees by size class: ", freed_by_size); Printf("Stats: malloc large: %zu\n", malloc_large);
PrintMallocStatsArray(" rfrees by size class: ", really_freed_by_size);
Printf("Stats: malloc large: %zu small slow: %zu\n",
malloc_large, malloc_small_slow);
} }
void AsanStats::MergeFrom(const AsanStats *stats) { void AsanStats::MergeFrom(const AsanStats *stats) {
@ -159,8 +155,7 @@ uptr __sanitizer_get_free_bytes() {
GetAccumulatedStats(&stats); GetAccumulatedStats(&stats);
uptr total_free = stats.mmaped uptr total_free = stats.mmaped
- stats.munmaped - stats.munmaped
+ stats.really_freed + stats.really_freed;
+ stats.really_freed_redzones;
uptr total_used = stats.malloced uptr total_used = stats.malloced
+ stats.malloced_redzones; + stats.malloced_redzones;
// Return sane value if total_free < total_used due to racy // Return sane value if total_free < total_used due to racy

View File

@ -30,20 +30,14 @@ struct AsanStats {
uptr freed; uptr freed;
uptr real_frees; uptr real_frees;
uptr really_freed; uptr really_freed;
uptr really_freed_redzones;
uptr reallocs; uptr reallocs;
uptr realloced; uptr realloced;
uptr mmaps; uptr mmaps;
uptr mmaped; uptr mmaped;
uptr munmaps; uptr munmaps;
uptr munmaped; uptr munmaped;
uptr mmaped_by_size[kNumberOfSizeClasses];
uptr malloced_by_size[kNumberOfSizeClasses];
uptr freed_by_size[kNumberOfSizeClasses];
uptr really_freed_by_size[kNumberOfSizeClasses];
uptr malloc_large; uptr malloc_large;
uptr malloc_small_slow; uptr malloced_by_size[kNumberOfSizeClasses];
// Ctor for global AsanStats (accumulated stats for dead threads). // Ctor for global AsanStats (accumulated stats for dead threads).
explicit AsanStats(LinkerInitialized) { } explicit AsanStats(LinkerInitialized) { }

View File

@ -0,0 +1,108 @@
//===-- asan_suppressions.cc ----------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Issue suppression and suppression-related functions.
//===----------------------------------------------------------------------===//
#include "asan_suppressions.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan {
ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
static SuppressionContext *suppression_ctx = nullptr;
static const char kInterceptorName[] = "interceptor_name";
static const char kInterceptorViaFunction[] = "interceptor_via_fun";
static const char kInterceptorViaLibrary[] = "interceptor_via_lib";
static const char kODRViolation[] = "odr_violation";
static const char *kSuppressionTypes[] = {
kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary,
kODRViolation};
extern "C" {
#if SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char *__asan_default_suppressions();
#else
// No week hooks, provide empty implementation.
const char *__asan_default_suppressions() { return ""; }
#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
} // extern "C"
void InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx);
suppression_ctx = new (suppression_placeholder) // NOLINT
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
suppression_ctx->ParseFromFile(flags()->suppressions);
if (&__asan_default_suppressions)
suppression_ctx->Parse(__asan_default_suppressions());
}
bool IsInterceptorSuppressed(const char *interceptor_name) {
CHECK(suppression_ctx);
Suppression *s;
// Match "interceptor_name" suppressions.
return suppression_ctx->Match(interceptor_name, kInterceptorName, &s);
}
bool HaveStackTraceBasedSuppressions() {
CHECK(suppression_ctx);
return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) ||
suppression_ctx->HasSuppressionType(kInterceptorViaLibrary);
}
bool IsODRViolationSuppressed(const char *global_var_name) {
CHECK(suppression_ctx);
Suppression *s;
// Match "odr_violation" suppressions.
return suppression_ctx->Match(global_var_name, kODRViolation, &s);
}
bool IsStackTraceSuppressed(const StackTrace *stack) {
if (!HaveStackTraceBasedSuppressions())
return false;
CHECK(suppression_ctx);
Symbolizer *symbolizer = Symbolizer::GetOrInit();
Suppression *s;
for (uptr i = 0; i < stack->size && stack->trace[i]; i++) {
uptr addr = stack->trace[i];
if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) {
// Match "interceptor_via_lib" suppressions.
if (const char *module_name = symbolizer->GetModuleNameForPc(addr))
if (suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s))
return true;
}
if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
const char *function_name = cur->info.function;
if (!function_name) {
continue;
}
// Match "interceptor_via_fun" suppressions.
if (suppression_ctx->Match(function_name, kInterceptorViaFunction,
&s)) {
frames->ClearAll();
return true;
}
}
frames->ClearAll();
}
}
return false;
}
} // namespace __asan

View File

@ -0,0 +1,28 @@
//===-- asan_suppressions.h -------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_suppressions.cc.
//===----------------------------------------------------------------------===//
#ifndef ASAN_SUPPRESSIONS_H
#define ASAN_SUPPRESSIONS_H
#include "asan_internal.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
namespace __asan {
void InitializeSuppressions();
bool IsInterceptorSuppressed(const char *interceptor_name);
bool HaveStackTraceBasedSuppressions();
bool IsStackTraceSuppressed(const StackTrace *stack);
bool IsODRViolationSuppressed(const char *global_var_name);
} // namespace __asan
#endif // ASAN_SUPPRESSIONS_H

View File

@ -25,6 +25,11 @@ namespace __asan {
// AsanThreadContext implementation. // AsanThreadContext implementation.
struct CreateThreadContextArgs {
AsanThread *thread;
StackTrace *stack;
};
void AsanThreadContext::OnCreated(void *arg) { void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg); CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack) if (args->stack)
@ -35,7 +40,7 @@ void AsanThreadContext::OnCreated(void *arg) {
void AsanThreadContext::OnFinished() { void AsanThreadContext::OnFinished() {
// Drop the link to the AsanThread object. // Drop the link to the AsanThread object.
thread = 0; thread = nullptr;
} }
// MIPS requires aligned address // MIPS requires aligned address
@ -73,13 +78,17 @@ AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
// AsanThread implementation. // AsanThread implementation.
AsanThread *AsanThread::Create(thread_callback_t start_routine, AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
void *arg) { u32 parent_tid, StackTrace *stack,
bool detached) {
uptr PageSize = GetPageSizeCached(); uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize); uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__); AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine; thread->start_routine_ = start_routine;
thread->arg_ = arg; thread->arg_ = arg;
CreateThreadContextArgs args = { thread, stack };
asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
parent_tid, &args);
return thread; return thread;
} }
@ -114,7 +123,7 @@ void AsanThread::Destroy() {
FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
uptr stack_size = this->stack_size(); uptr stack_size = this->stack_size();
if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
return 0; return nullptr;
uptr old_val = 0; uptr old_val = 0;
// fake_stack_ has 3 states: // fake_stack_ has 3 states:
// 0 -- not initialized // 0 -- not initialized
@ -135,11 +144,11 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
SetTLSFakeStack(fake_stack_); SetTLSFakeStack(fake_stack_);
return fake_stack_; return fake_stack_;
} }
return 0; return nullptr;
} }
void AsanThread::Init() { void AsanThread::Init() {
fake_stack_ = 0; // Will be initialized lazily if needed. fake_stack_ = nullptr; // Will be initialized lazily if needed.
CHECK_EQ(this->stack_size(), 0U); CHECK_EQ(this->stack_size(), 0U);
SetThreadStackAndTls(); SetThreadStackAndTls();
CHECK_GT(this->stack_size(), 0U); CHECK_GT(this->stack_size(), 0U);
@ -150,12 +159,15 @@ void AsanThread::Init() {
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
&local); &local);
AsanPlatformThreadInit();
} }
thread_return_t AsanThread::ThreadStart(uptr os_id) { thread_return_t AsanThread::ThreadStart(
uptr os_id, atomic_uintptr_t *signal_thread_is_registered) {
Init(); Init();
asanThreadRegistry().StartThread(tid(), os_id, 0); asanThreadRegistry().StartThread(tid(), os_id, nullptr);
if (signal_thread_is_registered)
atomic_store(signal_thread_is_registered, 1, memory_order_release);
if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
if (!start_routine_) { if (!start_routine_) {
@ -262,7 +274,7 @@ AsanThread *GetCurrentThread() {
return tctx->thread; return tctx->thread;
} }
} }
return 0; return nullptr;
} }
return context->thread; return context->thread;
} }
@ -287,7 +299,7 @@ AsanThread *FindThreadByStackAddress(uptr addr) {
AsanThreadContext *tctx = static_cast<AsanThreadContext *>( AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress, asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
(void *)addr)); (void *)addr));
return tctx ? tctx->thread : 0; return tctx ? tctx->thread : nullptr;
} }
void EnsureMainThreadIDIsCorrect() { void EnsureMainThreadIDIsCorrect() {
@ -300,10 +312,10 @@ void EnsureMainThreadIDIsCorrect() {
__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) { __asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
if (!context) return 0; if (!context) return nullptr;
return context->thread; return context->thread;
} }
} // namespace __asan } // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1 // --- Implementation of LSan-specific functions --- {{{1
namespace __lsan { namespace __lsan {
@ -340,4 +352,4 @@ void UnlockThreadRegistry() {
void EnsureMainThreadIDIsCorrect() { void EnsureMainThreadIDIsCorrect() {
__asan::EnsureMainThreadIDIsCorrect(); __asan::EnsureMainThreadIDIsCorrect();
} }
} // namespace __lsan } // namespace __lsan

View File

@ -9,6 +9,7 @@
// //
// ASan-private header for asan_thread.cc. // ASan-private header for asan_thread.cc.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifndef ASAN_THREAD_H #ifndef ASAN_THREAD_H
#define ASAN_THREAD_H #define ASAN_THREAD_H
@ -32,19 +33,16 @@ class AsanThread;
class AsanThreadContext : public ThreadContextBase { class AsanThreadContext : public ThreadContextBase {
public: public:
explicit AsanThreadContext(int tid) explicit AsanThreadContext(int tid)
: ThreadContextBase(tid), : ThreadContextBase(tid), announced(false),
announced(false), destructor_iterations(GetPthreadDestructorIterations()), stack_id(0),
destructor_iterations(kPthreadDestructorIterations), thread(nullptr) {}
stack_id(0),
thread(0) {
}
bool announced; bool announced;
u8 destructor_iterations; u8 destructor_iterations;
u32 stack_id; u32 stack_id;
AsanThread *thread; AsanThread *thread;
void OnCreated(void *arg); void OnCreated(void *arg) override;
void OnFinished(); void OnFinished() override;
}; };
// AsanThreadContext objects are never freed, so we need many of them. // AsanThreadContext objects are never freed, so we need many of them.
@ -53,12 +51,14 @@ COMPILER_CHECK(sizeof(AsanThreadContext) <= 256);
// AsanThread are stored in TSD and destroyed when the thread dies. // AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread { class AsanThread {
public: public:
static AsanThread *Create(thread_callback_t start_routine, void *arg); static AsanThread *Create(thread_callback_t start_routine, void *arg,
u32 parent_tid, StackTrace *stack, bool detached);
static void TSDDtor(void *tsd); static void TSDDtor(void *tsd);
void Destroy(); void Destroy();
void Init(); // Should be called from the thread itself. void Init(); // Should be called from the thread itself.
thread_return_t ThreadStart(uptr os_id); thread_return_t ThreadStart(uptr os_id,
atomic_uintptr_t *signal_thread_is_registered);
uptr stack_top() { return stack_top_; } uptr stack_top() { return stack_top_; }
uptr stack_bottom() { return stack_bottom_; } uptr stack_bottom() { return stack_bottom_; }
@ -83,8 +83,8 @@ class AsanThread {
void DeleteFakeStack(int tid) { void DeleteFakeStack(int tid) {
if (!fake_stack_) return; if (!fake_stack_) return;
FakeStack *t = fake_stack_; FakeStack *t = fake_stack_;
fake_stack_ = 0; fake_stack_ = nullptr;
SetTLSFakeStack(0); SetTLSFakeStack(nullptr);
t->Destroy(tid); t->Destroy(tid);
} }
@ -94,7 +94,7 @@ class AsanThread {
FakeStack *fake_stack() { FakeStack *fake_stack() {
if (!__asan_option_detect_stack_use_after_return) if (!__asan_option_detect_stack_use_after_return)
return 0; return nullptr;
if (!has_fake_stack()) if (!has_fake_stack())
return AsyncSignalSafeLazyInitFakeStack(); return AsyncSignalSafeLazyInitFakeStack();
return fake_stack_; return fake_stack_;
@ -164,11 +164,6 @@ class ScopedDeadlySignal {
AsanThread *thread; AsanThread *thread;
}; };
struct CreateThreadContextArgs {
AsanThread *thread;
StackTrace *stack;
};
// Returns a single instance of registry. // Returns a single instance of registry.
ThreadRegistry &asanThreadRegistry(); ThreadRegistry &asanThreadRegistry();
@ -183,6 +178,6 @@ AsanThread *FindThreadByStackAddress(uptr addr);
// Used to handle fork(). // Used to handle fork().
void EnsureMainThreadIDIsCorrect(); void EnsureMainThreadIDIsCorrect();
} // namespace __asan } // namespace __asan
#endif // ASAN_THREAD_H #endif // ASAN_THREAD_H

View File

@ -14,27 +14,139 @@
#if SANITIZER_WINDOWS #if SANITIZER_WINDOWS
#include <windows.h> #include <windows.h>
#include <dbghelp.h>
#include <stdlib.h> #include <stdlib.h>
#include "asan_interceptors.h" #include "asan_interceptors.h"
#include "asan_internal.h" #include "asan_internal.h"
#include "asan_report.h" #include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h" #include "asan_thread.h"
#include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h" #include "sanitizer_common/sanitizer_mutex.h"
using namespace __asan; // NOLINT
extern "C" { extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
int __asan_should_detect_stack_use_after_return() { int __asan_should_detect_stack_use_after_return() {
__asan_init(); __asan_init();
return __asan_option_detect_stack_use_after_return; return __asan_option_detect_stack_use_after_return;
}
} }
// -------------------- A workaround for the abscence of weak symbols ----- {{{
// We don't have a direct equivalent of weak symbols when using MSVC, but we can
// use the /alternatename directive to tell the linker to default a specific
// symbol to a specific value, which works nicely for allocator hooks and
// __asan_default_options().
void __sanitizer_default_malloc_hook(void *ptr, uptr size) { }
void __sanitizer_default_free_hook(void *ptr) { }
const char* __asan_default_default_options() { return ""; }
const char* __asan_default_default_suppressions() { return ""; }
void __asan_default_on_error() {}
#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT
#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT
#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT
#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT
#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT
// }}}
} // extern "C"
// ---------------------- Windows-specific inteceptors ---------------- {{{
INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
CHECK(REAL(RaiseException));
__asan_handle_no_return();
REAL(RaiseException)(a, b, c, d);
}
INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
CHECK(REAL(_except_handler3));
__asan_handle_no_return();
return REAL(_except_handler3)(a, b, c, d);
}
#if ASAN_DYNAMIC
// This handler is named differently in -MT and -MD CRTs.
#define _except_handler4 _except_handler4_common
#endif
INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
CHECK(REAL(_except_handler4));
__asan_handle_no_return();
return REAL(_except_handler4)(a, b, c, d);
}
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread*)arg;
SetCurrentThread(t);
return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr);
}
INTERCEPTOR_WINAPI(DWORD, CreateThread,
void* security, uptr stack_size,
DWORD (__stdcall *start_routine)(void*), void* arg,
DWORD thr_flags, void* tid) {
// Strict init-order checking is thread-hostile.
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
// FIXME: The CreateThread interceptor is not the same as a pthread_create
// one. This is a bandaid fix for PR22025.
bool detached = false; // FIXME: how can we determine it on Windows?
u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t =
AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
return REAL(CreateThread)(security, stack_size,
asan_thread_start, t, thr_flags, tid);
}
namespace {
BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED);
void EnsureWorkerThreadRegistered() {
// FIXME: GetCurrentThread relies on TSD, which might not play well with
// system thread pools. We might want to use something like reference
// counting to zero out GetCurrentThread() underlying storage when the last
// work item finishes? Or can we disable reclaiming of threads in the pool?
BlockingMutexLock l(&mu_for_thread_tracking);
if (__asan::GetCurrentThread())
return;
AsanThread *t = AsanThread::Create(
/* start_routine */ nullptr, /* arg */ nullptr,
/* parent_tid */ -1, /* stack */ nullptr, /* detached */ true);
t->Init();
asanThreadRegistry().StartThread(t->tid(), 0, 0);
SetCurrentThread(t);
}
} // namespace
INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) {
// NtWaitForWorkViaWorkerFactory is called from system worker pool threads to
// query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc.
// System worker pool threads are created at arbitraty point in time and
// without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory
// instead and don't register a specific parent_tid/stack.
EnsureWorkerThreadRegistered();
return REAL(NtWaitForWorkViaWorkerFactory)(a, b);
}
// }}}
namespace __asan { namespace __asan {
// ---------------------- TSD ---------------- {{{1 void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
ASAN_INTERCEPT_FUNC(RaiseException);
ASAN_INTERCEPT_FUNC(_except_handler3);
ASAN_INTERCEPT_FUNC(_except_handler4);
// NtWaitForWorkViaWorkerFactory is always linked dynamically.
CHECK(::__interception::OverrideFunction(
"NtWaitForWorkViaWorkerFactory",
(uptr)WRAP(NtWaitForWorkViaWorkerFactory),
(uptr *)&REAL(NtWaitForWorkViaWorkerFactory)));
}
// ---------------------- TSD ---------------- {{{
static bool tsd_key_inited = false; static bool tsd_key_inited = false;
static __declspec(thread) void *fake_tsd = 0; static __declspec(thread) void *fake_tsd = 0;
@ -57,7 +169,13 @@ void AsanTSDSet(void *tsd) {
void PlatformTSDDtor(void *tsd) { void PlatformTSDDtor(void *tsd) {
AsanThread::TSDDtor(tsd); AsanThread::TSDDtor(tsd);
} }
// ---------------------- Various stuff ---------------- {{{1 // }}}
// ---------------------- Various stuff ---------------- {{{
void DisableReexec() {
// No need to re-exec on Windows.
}
void MaybeReexec() { void MaybeReexec() {
// No need to re-exec on Windows. // No need to re-exec on Windows.
} }
@ -73,15 +191,11 @@ void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {} void AsanCheckIncompatibleRT() {}
void AsanPlatformThreadInit() {
// Nothing here for now.
}
void ReadContextStack(void *context, uptr *stack, uptr *ssize) { void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void AsanOnSIGSEGV(int, void *siginfo, void *context) { void AsanOnDeadlySignal(int, void *siginfo, void *context) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
@ -90,12 +204,6 @@ static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) { static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
EXCEPTION_RECORD *exception_record = info->ExceptionRecord; EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
CONTEXT *context = info->ContextRecord; CONTEXT *context = info->ContextRecord;
uptr pc = (uptr)exception_record->ExceptionAddress;
#ifdef _WIN64
uptr bp = (uptr)context->Rbp, sp = (uptr)context->Rsp;
#else
uptr bp = (uptr)context->Ebp, sp = (uptr)context->Esp;
#endif
if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) { exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
@ -103,8 +211,8 @@ static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
(exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
? "access-violation" ? "access-violation"
: "in-page-error"; : "in-page-error";
uptr access_addr = exception_record->ExceptionInformation[1]; SignalContext sig = SignalContext::Create(exception_record, context);
ReportSIGSEGV(description, pc, sp, bp, context, access_addr); ReportDeadlySignal(description, sig);
} }
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here. // FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
@ -142,10 +250,10 @@ int __asan_set_seh_filter() {
// Put a pointer to __asan_set_seh_filter at the end of the global list // Put a pointer to __asan_set_seh_filter at the end of the global list
// of C initializers, after the default EH is set by the CRT. // of C initializers, after the default EH is set by the CRT.
#pragma section(".CRT$XIZ", long, read) // NOLINT #pragma section(".CRT$XIZ", long, read) // NOLINT
static __declspec(allocate(".CRT$XIZ")) __declspec(allocate(".CRT$XIZ"))
int (*__intercept_seh)() = __asan_set_seh_filter; int (*__intercept_seh)() = __asan_set_seh_filter;
#endif #endif
// }}}
} // namespace __asan } // namespace __asan
#endif // _WIN32 #endif // _WIN32

View File

@ -19,7 +19,7 @@
// simplifies the build procedure. // simplifies the build procedure.
#ifdef ASAN_DLL_THUNK #ifdef ASAN_DLL_THUNK
#include "asan_init_version.h" #include "asan_init_version.h"
#include "sanitizer_common/sanitizer_interception.h" #include "interception/interception.h"
// ---------- Function interception helper functions and macros ----------- {{{1 // ---------- Function interception helper functions and macros ----------- {{{1
extern "C" { extern "C" {
@ -28,8 +28,9 @@ void *__stdcall GetProcAddress(void *module, const char *proc_name);
void abort(); void abort();
} }
static void *getRealProcAddressOrDie(const char *name) { static uptr getRealProcAddressOrDie(const char *name) {
void *ret = GetProcAddress(GetModuleHandleA(0), name); uptr ret =
__interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
if (!ret) if (!ret)
abort(); abort();
return ret; return ret;
@ -60,13 +61,12 @@ struct FunctionInterceptor<0> {
}; };
#define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \ #define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \
template<> struct FunctionInterceptor<__LINE__> { \ template <> struct FunctionInterceptor<__LINE__> { \
static void Execute() { \ static void Execute() { \
void *wrapper = getRealProcAddressOrDie(main_function); \ uptr wrapper = getRealProcAddressOrDie(main_function); \
if (!__interception::OverrideFunction((uptr)dll_function, \ if (!__interception::OverrideFunction((uptr)dll_function, wrapper, 0)) \
(uptr)wrapper, 0)) \
abort(); \ abort(); \
FunctionInterceptor<__LINE__-1>::Execute(); \ FunctionInterceptor<__LINE__ - 1>::Execute(); \
} \ } \
}; };
@ -208,7 +208,7 @@ extern "C" {
// __asan_init is expected to be called by only one thread. // __asan_init is expected to be called by only one thread.
if (fn) return; if (fn) return;
fn = (fntype)getRealProcAddressOrDie(__asan_init_name); fn = (fntype)getRealProcAddressOrDie("__asan_init");
fn(); fn();
__asan_option_detect_stack_use_after_return = __asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0); (__asan_should_detect_stack_use_after_return() != 0);
@ -217,6 +217,10 @@ extern "C" {
} }
} }
extern "C" void __asan_version_mismatch_check() {
// Do nothing.
}
INTERFACE_FUNCTION(__asan_handle_no_return) INTERFACE_FUNCTION(__asan_handle_no_return)
INTERFACE_FUNCTION(__asan_report_store1) INTERFACE_FUNCTION(__asan_report_store1)
@ -292,7 +296,45 @@ INTERFACE_FUNCTION(__asan_stack_free_8)
INTERFACE_FUNCTION(__asan_stack_free_9) INTERFACE_FUNCTION(__asan_stack_free_9)
INTERFACE_FUNCTION(__asan_stack_free_10) INTERFACE_FUNCTION(__asan_stack_free_10)
// FIXME: we might want to have a sanitizer_win_dll_thunk?
INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_cov)
INTERFACE_FUNCTION(__sanitizer_cov_dump)
INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
INTERFACE_FUNCTION(__sanitizer_cov_init)
INTERFACE_FUNCTION(__sanitizer_cov_module_init) INTERFACE_FUNCTION(__sanitizer_cov_module_init)
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp)
INTERFACE_FUNCTION(__sanitizer_cov_trace_switch)
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
INTERFACE_FUNCTION(__sanitizer_get_heap_size)
INTERFACE_FUNCTION(__sanitizer_get_ownership)
INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
INTERFACE_FUNCTION(__sanitizer_reset_coverage)
INTERFACE_FUNCTION(__sanitizer_get_number_of_counters)
INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
INTERFACE_FUNCTION(__sanitizer_set_report_path)
INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
// TODO(timurrrr): Add more interface functions on the as-needed basis. // TODO(timurrrr): Add more interface functions on the as-needed basis.
@ -342,11 +384,15 @@ INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT
INTERCEPT_LIBRARY_FUNCTION(strchr); INTERCEPT_LIBRARY_FUNCTION(strchr);
INTERCEPT_LIBRARY_FUNCTION(strcmp); INTERCEPT_LIBRARY_FUNCTION(strcmp);
INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT
INTERCEPT_LIBRARY_FUNCTION(strcspn);
INTERCEPT_LIBRARY_FUNCTION(strlen); INTERCEPT_LIBRARY_FUNCTION(strlen);
INTERCEPT_LIBRARY_FUNCTION(strncat); INTERCEPT_LIBRARY_FUNCTION(strncat);
INTERCEPT_LIBRARY_FUNCTION(strncmp); INTERCEPT_LIBRARY_FUNCTION(strncmp);
INTERCEPT_LIBRARY_FUNCTION(strncpy); INTERCEPT_LIBRARY_FUNCTION(strncpy);
INTERCEPT_LIBRARY_FUNCTION(strnlen); INTERCEPT_LIBRARY_FUNCTION(strnlen);
INTERCEPT_LIBRARY_FUNCTION(strpbrk);
INTERCEPT_LIBRARY_FUNCTION(strspn);
INTERCEPT_LIBRARY_FUNCTION(strstr);
INTERCEPT_LIBRARY_FUNCTION(strtol); INTERCEPT_LIBRARY_FUNCTION(strtol);
INTERCEPT_LIBRARY_FUNCTION(wcslen); INTERCEPT_LIBRARY_FUNCTION(wcslen);

View File

@ -13,7 +13,8 @@
// //
// This includes: // This includes:
// - forwarding the detect_stack_use_after_return runtime option // - forwarding the detect_stack_use_after_return runtime option
// - installing a custom SEH handler // - working around deficiencies of the MD runtime
// - installing a custom SEH handlerx
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -21,10 +22,15 @@
// Using #ifdef rather than relying on Makefiles etc. // Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure. // simplifies the build procedure.
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK #ifdef ASAN_DYNAMIC_RUNTIME_THUNK
extern "C" { #include <windows.h>
__declspec(dllimport) int __asan_set_seh_filter();
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
// First, declare CRT sections we'll be using in this file
#pragma section(".CRT$XID", long, read) // NOLINT
#pragma section(".CRT$XIZ", long, read) // NOLINT
#pragma section(".CRT$XTW", long, read) // NOLINT
#pragma section(".CRT$XTY", long, read) // NOLINT
////////////////////////////////////////////////////////////////////////////////
// Define a copy of __asan_option_detect_stack_use_after_return that should be // Define a copy of __asan_option_detect_stack_use_after_return that should be
// used when linking an MD runtime with a set of object files on Windows. // used when linking an MD runtime with a set of object files on Windows.
// //
@ -35,16 +41,55 @@ __declspec(dllimport) int __asan_should_detect_stack_use_after_return();
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows // with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
// just to work around this issue, let's clone the a variable that is // just to work around this issue, let's clone the a variable that is
// constant after initialization anyways. // constant after initialization anyways.
extern "C" {
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return = int __asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return(); __asan_should_detect_stack_use_after_return();
}
////////////////////////////////////////////////////////////////////////////////
// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL
// unload or on exit. ASan relies on LLVM global_dtors to call
// __asan_unregister_globals on these events, which unfortunately doesn't work
// with the MD runtime, see PR22545 for the details.
// To work around this, for each DLL we schedule a call to UnregisterGlobals
// using atexit() that calls a small subset of C terminators
// where LLVM global_dtors is placed. Fingers crossed, no other C terminators
// are there.
extern "C" void __cdecl _initterm(void *a, void *b);
namespace {
__declspec(allocate(".CRT$XTW")) void* before_global_dtors = 0;
__declspec(allocate(".CRT$XTY")) void* after_global_dtors = 0;
void UnregisterGlobals() {
_initterm(&before_global_dtors, &after_global_dtors);
}
int ScheduleUnregisterGlobals() {
return atexit(UnregisterGlobals);
}
// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
// atexit() is initialized (.CRT$XIC). As this is executed before C++
// initializers (think ctors for globals), UnregisterGlobals gets executed after
// dtors for C++ globals.
__declspec(allocate(".CRT$XID"))
int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
} // namespace
////////////////////////////////////////////////////////////////////////////////
// ASan SEH handling.
// We need to set the ASan-specific SEH handler at the end of CRT initialization
// of each module (see also asan_win.cc).
extern "C" {
__declspec(dllimport) int __asan_set_seh_filter();
static int SetSEHFilter() { return __asan_set_seh_filter(); }
// Set the ASan-specific SEH handler at the end of CRT initialization of each
// module (see asan_win.cc for the details).
//
// Unfortunately, putting a pointer to __asan_set_seh_filter into // Unfortunately, putting a pointer to __asan_set_seh_filter into
// __asan_intercept_seh gets optimized out, so we have to use an extra function. // __asan_intercept_seh gets optimized out, so we have to use an extra function.
static int SetSEHFilter() { return __asan_set_seh_filter(); }
#pragma section(".CRT$XIZ", long, read) // NOLINT
__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter; __declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
} }
#endif // ASAN_DYNAMIC_RUNTIME_THUNK #endif // ASAN_DYNAMIC_RUNTIME_THUNK

View File

@ -3,4 +3,4 @@
# a separate file so that version updates don't involve re-running # a separate file so that version updates don't involve re-running
# automake. # automake.
# CURRENT:REVISION:AGE # CURRENT:REVISION:AGE
2:0:0 3:0:0

View File

@ -616,6 +616,8 @@ BACKTRACE_SUPPORTED
FORMAT_FILE FORMAT_FILE
SANITIZER_SUPPORTED_FALSE SANITIZER_SUPPORTED_FALSE
SANITIZER_SUPPORTED_TRUE SANITIZER_SUPPORTED_TRUE
USE_CXX_ABI_FLAG_FALSE
USE_CXX_ABI_FLAG_TRUE
USING_MAC_INTERPOSE_FALSE USING_MAC_INTERPOSE_FALSE
USING_MAC_INTERPOSE_TRUE USING_MAC_INTERPOSE_TRUE
link_liblsan link_liblsan
@ -12027,7 +12029,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF cat > conftest.$ac_ext <<_LT_EOF
#line 12030 "configure" #line 12032 "configure"
#include "confdefs.h" #include "confdefs.h"
#if HAVE_DLFCN_H #if HAVE_DLFCN_H
@ -12133,7 +12135,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF cat > conftest.$ac_ext <<_LT_EOF
#line 12136 "configure" #line 12138 "configure"
#include "confdefs.h" #include "confdefs.h"
#if HAVE_DLFCN_H #if HAVE_DLFCN_H
@ -15514,7 +15516,7 @@ done
# Common libraries that we need to link against for all sanitizer libs. # Common libraries that we need to link against for all sanitizer libs.
link_sanitizer_common='-lpthread -ldl -lm' link_sanitizer_common='-lrt -lpthread -ldl -lm'
# Set up the set of additional libraries that we need to link against for libasan. # Set up the set of additional libraries that we need to link against for libasan.
link_libasan=$link_sanitizer_common link_libasan=$link_sanitizer_common
@ -15532,58 +15534,9 @@ link_libubsan=$link_sanitizer_common
link_liblsan=$link_sanitizer_common link_liblsan=$link_sanitizer_common
# At least for glibc, clock_gettime is in librt. But don't pull that
# in if it still doesn't give us the function we want. This
# test is copied from libgomp.
if test $ac_cv_func_clock_gettime = no; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for clock_gettime in -lrt" >&5
$as_echo_n "checking for clock_gettime in -lrt... " >&6; }
if test "${ac_cv_lib_rt_clock_gettime+set}" = set; then :
$as_echo_n "(cached) " >&6
else
ac_check_lib_save_LIBS=$LIBS
LIBS="-lrt $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
/* Override any GCC internal prototype to avoid an error.
Use char because int might match the return type of a GCC
builtin and then its argument prototype would still apply. */
#ifdef __cplusplus
extern "C"
#endif
char clock_gettime ();
int
main ()
{
return clock_gettime ();
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
ac_cv_lib_rt_clock_gettime=yes
else
ac_cv_lib_rt_clock_gettime=no
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
LIBS=$ac_check_lib_save_LIBS
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_rt_clock_gettime" >&5
$as_echo "$ac_cv_lib_rt_clock_gettime" >&6; }
if test "x$ac_cv_lib_rt_clock_gettime" = x""yes; then :
link_libasan="-lrt $link_libasan"
link_libtsan="-lrt $link_libtsan"
# Other sanitizers do not override clock_* API
fi
fi
case "$host" in case "$host" in
*-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ;; *-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ; CXX_ABI_NEEDED=true ;;
*) MAC_INTERPOSE=false ;; *) MAC_INTERPOSE=false ; CXX_ABI_NEEDED=false ;;
esac esac
if $MAC_INTERPOSE; then if $MAC_INTERPOSE; then
USING_MAC_INTERPOSE_TRUE= USING_MAC_INTERPOSE_TRUE=
@ -15593,6 +15546,14 @@ else
USING_MAC_INTERPOSE_FALSE= USING_MAC_INTERPOSE_FALSE=
fi fi
if $CXX_ABI_NEEDED; then
USE_CXX_ABI_FLAG_TRUE=
USE_CXX_ABI_FLAG_FALSE='#'
else
USE_CXX_ABI_FLAG_TRUE='#'
USE_CXX_ABI_FLAG_FALSE=
fi
backtrace_supported=yes backtrace_supported=yes
@ -16550,6 +16511,10 @@ if test -z "${USING_MAC_INTERPOSE_TRUE}" && test -z "${USING_MAC_INTERPOSE_FALSE
as_fn_error "conditional \"USING_MAC_INTERPOSE\" was never defined. as_fn_error "conditional \"USING_MAC_INTERPOSE\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5 Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi fi
if test -z "${USE_CXX_ABI_FLAG_TRUE}" && test -z "${USE_CXX_ABI_FLAG_FALSE}"; then
as_fn_error "conditional \"USE_CXX_ABI_FLAG\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
if test -z "${SANITIZER_SUPPORTED_TRUE}" && test -z "${SANITIZER_SUPPORTED_FALSE}"; then if test -z "${SANITIZER_SUPPORTED_TRUE}" && test -z "${SANITIZER_SUPPORTED_FALSE}"; then
as_fn_error "conditional \"SANITIZER_SUPPORTED\" was never defined. as_fn_error "conditional \"SANITIZER_SUPPORTED\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5 Usually this means the macro was only invoked conditionally." "$LINENO" 5

View File

@ -96,7 +96,7 @@ AM_CONDITIONAL(LSAN_SUPPORTED, [test "x$LSAN_SUPPORTED" = "xyes"])
AC_CHECK_FUNCS(clock_getres clock_gettime clock_settime) AC_CHECK_FUNCS(clock_getres clock_gettime clock_settime)
# Common libraries that we need to link against for all sanitizer libs. # Common libraries that we need to link against for all sanitizer libs.
link_sanitizer_common='-lpthread -ldl -lm' link_sanitizer_common='-lrt -lpthread -ldl -lm'
# Set up the set of additional libraries that we need to link against for libasan. # Set up the set of additional libraries that we need to link against for libasan.
link_libasan=$link_sanitizer_common link_libasan=$link_sanitizer_common
@ -114,22 +114,12 @@ AC_SUBST(link_libubsan)
link_liblsan=$link_sanitizer_common link_liblsan=$link_sanitizer_common
AC_SUBST(link_liblsan) AC_SUBST(link_liblsan)
# At least for glibc, clock_gettime is in librt. But don't pull that
# in if it still doesn't give us the function we want. This
# test is copied from libgomp.
if test $ac_cv_func_clock_gettime = no; then
AC_CHECK_LIB(rt, clock_gettime,
[link_libasan="-lrt $link_libasan"
link_libtsan="-lrt $link_libtsan"
# Other sanitizers do not override clock_* API
])
fi
case "$host" in case "$host" in
*-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ;; *-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ; CXX_ABI_NEEDED=true ;;
*) MAC_INTERPOSE=false ;; *) MAC_INTERPOSE=false ; CXX_ABI_NEEDED=false ;;
esac esac
AM_CONDITIONAL(USING_MAC_INTERPOSE, $MAC_INTERPOSE) AM_CONDITIONAL(USING_MAC_INTERPOSE, $MAC_INTERPOSE)
AM_CONDITIONAL(USE_CXX_ABI_FLAG, $CXX_ABI_NEEDED)
backtrace_supported=yes backtrace_supported=yes

View File

@ -35,6 +35,9 @@ case "${target}" in
arm*-*-linux*) arm*-*-linux*)
;; ;;
aarch64*-*-linux*) aarch64*-*-linux*)
if test x$ac_cv_sizeof_void_p = x8; then
TSAN_SUPPORTED=yes
fi
;; ;;
x86_64-*-darwin[1]* | i?86-*-darwin[1]*) x86_64-*-darwin[1]* | i?86-*-darwin[1]*)
TSAN_SUPPORTED=no TSAN_SUPPORTED=no

View File

@ -108,12 +108,7 @@ extern "C" {
void __asan_report_error(void *pc, void *bp, void *sp, void __asan_report_error(void *pc, void *bp, void *sp,
void *addr, int is_write, size_t access_size); void *addr, int is_write, size_t access_size);
// Sets the exit code to use when reporting an error. // Deprecated. Call __sanitizer_set_death_callback instead.
// Returns the old value.
int __asan_set_error_exit_code(int exit_code);
// Sets the callback to be called right before death on error.
// Passing 0 will unset the callback.
void __asan_set_death_callback(void (*callback)(void)); void __asan_set_death_callback(void (*callback)(void));
void __asan_set_error_report_callback(void (*callback)(const char*)); void __asan_set_error_report_callback(void (*callback)(const char*));

View File

@ -60,15 +60,6 @@ extern "C" {
void __sanitizer_unaligned_store32(void *p, uint32_t x); void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x); void __sanitizer_unaligned_store64(void *p, uint64_t x);
// Initialize coverage.
void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
// Open <name>.sancov.packed in the coverage directory and return the file
// descriptor. Returns -1 on failure, or if coverage dumping is disabled.
// This is intended for use by sandboxing code.
intptr_t __sanitizer_maybe_open_cov_file(const char *name);
// Annotate the current state of a contiguous container, such as // Annotate the current state of a contiguous container, such as
// std::vector, std::string or similar. // std::vector, std::string or similar.
// A contiguous container is a container that keeps all of its elements // A contiguous container is a container that keeps all of its elements
@ -115,6 +106,20 @@ extern "C" {
// Print the stack trace leading to this call. Useful for debugging user code. // Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace(); void __sanitizer_print_stack_trace();
// Sets the callback to be called right before death on error.
// Passing 0 will unset the callback.
void __sanitizer_set_death_callback(void (*callback)(void));
// Interceptor hooks.
// Whenever a libc function interceptor is called it checks if the
// corresponding weak hook is defined, and it so -- calls it.
// The primary use case is data-flow-guided fuzzing, where the fuzzer needs
// to know what is being passed to libc functions, e.g. memcmp.
// FIXME: implement more hooks.
void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
const void *s2, size_t n);
void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
const char *s2, size_t n);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
#endif #endif

View File

@ -0,0 +1,61 @@
//===-- sanitizer/coverage_interface.h --------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Public interface for sanitizer coverage.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_COVERAG_INTERFACE_H
#define SANITIZER_COVERAG_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
// Initialize coverage.
void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
// Open <name>.sancov.packed in the coverage directory and return the file
// descriptor. Returns -1 on failure, or if coverage dumping is disabled.
// This is intended for use by sandboxing code.
intptr_t __sanitizer_maybe_open_cov_file(const char *name);
// Get the number of total unique covered entities (blocks, edges, calls).
// This can be useful for coverage-directed in-process fuzzers.
uintptr_t __sanitizer_get_total_unique_coverage();
// Reset the basic-block (edge) coverage to the initial state.
// Useful for in-process fuzzing to start collecting coverage from scratch.
// Experimental, will likely not work for multi-threaded process.
void __sanitizer_reset_coverage();
// Set *data to the array of covered PCs and return the size of that array.
// Some of the entries in *data will be zero.
uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data);
// The coverage instrumentation may optionally provide imprecise counters.
// Rather than exposing the counter values to the user we instead map
// the counters to a bitset.
// Every counter is associated with 8 bits in the bitset.
// We define 8 value ranges: 1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+
// The i-th bit is set to 1 if the counter value is in the i-th range.
// This counter-based coverage implementation is *not* thread-safe.
// Returns the number of registered coverage counters.
uintptr_t __sanitizer_get_number_of_counters();
// Updates the counter 'bitset', clears the counters and returns the number of
// new bits in 'bitset'.
// If 'bitset' is nullptr, only clears the counters.
// Otherwise 'bitset' should be at least
// __sanitizer_get_number_of_counters bytes long and 8-aligned.
uintptr_t
__sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // SANITIZER_COVERAG_INTERFACE_H

View File

@ -83,6 +83,24 @@ size_t dfsan_get_label_count(void);
/// callback executes. Pass in NULL to remove any callback. /// callback executes. Pass in NULL to remove any callback.
void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback); void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
/// Writes the labels currently used by the program to the given file
/// descriptor. The lines of the output have the following format:
///
/// <label> <parent label 1> <parent label 2> <label description if any>
void dfsan_dump_labels(int fd);
/// Interceptor hooks.
/// Whenever a dfsan's custom function is called the corresponding
/// hook is called it non-zero. The hooks should be defined by the user.
/// The primary use case is taint-guided fuzzing, where the fuzzer
/// needs to see the parameters of the function and the labels.
/// FIXME: implement more hooks.
void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
size_t n, dfsan_label s1_label,
dfsan_label s2_label, dfsan_label n_label);
void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
size_t n, dfsan_label s1_label,
dfsan_label s2_label, dfsan_label n_label);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"

View File

@ -39,14 +39,25 @@ extern "C" {
void __lsan_register_root_region(const void *p, size_t size); void __lsan_register_root_region(const void *p, size_t size);
void __lsan_unregister_root_region(const void *p, size_t size); void __lsan_unregister_root_region(const void *p, size_t size);
// Calling this function makes LSan enter the leak checking phase immediately. // Check for leaks now. This function behaves identically to the default
// Use this if normal end-of-process leak checking happens too late (e.g. if // end-of-process leak check. In particular, it will terminate the process if
// you have intentional memory leaks in your shutdown code). Calling this // leaks are found and the exitcode runtime flag is non-zero.
// function overrides end-of-process leak checking; it must be called at // Subsequent calls to this function will have no effect and end-of-process
// most once per process. This function will terminate the process if there // leak check will not run. Effectively, end-of-process leak check is moved to
// are memory leaks and the exit_code flag is non-zero. // the time of first invocation of this function.
// By calling this function early during process shutdown, you can instruct
// LSan to ignore shutdown-only leaks which happen later on.
void __lsan_do_leak_check(); void __lsan_do_leak_check();
// Check for leaks now. Returns zero if no leaks have been found or if leak
// detection is disabled, non-zero otherwise.
// This function may be called repeatedly, e.g. to periodically check a
// long-running process. It prints a leak report if appropriate, but does not
// terminate the process. It does not affect the behavior of
// __lsan_do_leak_check() or the end-of-process leak check, and is not
// affected by them.
int __lsan_do_recoverable_leak_check();
// The user may optionally provide this function to disallow leak checking // The user may optionally provide this function to disallow leak checking
// for the program it is linked into (if the return value is non-zero). This // for the program it is linked into (if the return value is non-zero). This
// function must be defined as returning a constant value; any behavior beyond // function must be defined as returning a constant value; any behavior beyond

View File

@ -23,6 +23,11 @@ extern "C" {
/* Get raw origin for an address. */ /* Get raw origin for an address. */
uint32_t __msan_get_origin(const volatile void *a); uint32_t __msan_get_origin(const volatile void *a);
/* Test that this_id is a descendant of prev_id (or they are simply equal).
* "descendant" here means they are part of the same chain, created with
* __msan_chain_origin. */
int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
/* Returns non-zero if tracking origins. */ /* Returns non-zero if tracking origins. */
int __msan_get_track_origins(); int __msan_get_track_origins();
@ -36,7 +41,9 @@ extern "C" {
contents). */ contents). */
void __msan_unpoison_string(const volatile char *a); void __msan_unpoison_string(const volatile char *a);
/* Make memory region fully uninitialized (without changing its contents). */ /* Make memory region fully uninitialized (without changing its contents).
This is a legacy interface that does not update origin information. Use
__msan_allocated_memory() instead. */
void __msan_poison(const volatile void *a, size_t size); void __msan_poison(const volatile void *a, size_t size);
/* Make memory region partially uninitialized (without changing its contents). /* Make memory region partially uninitialized (without changing its contents).
@ -52,10 +59,6 @@ extern "C" {
* is not. */ * is not. */
void __msan_check_mem_is_initialized(const volatile void *x, size_t size); void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
/* Set exit code when error(s) were detected.
Value of 0 means don't change the program exit code. */
void __msan_set_exit_code(int exit_code);
/* For testing: /* For testing:
__msan_set_expect_umr(1); __msan_set_expect_umr(1);
... some buggy code ... ... some buggy code ...
@ -83,14 +86,22 @@ extern "C" {
Memory will be marked uninitialized, with origin at the call site. */ Memory will be marked uninitialized, with origin at the call site. */
void __msan_allocated_memory(const volatile void* data, size_t size); void __msan_allocated_memory(const volatile void* data, size_t size);
/* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
void __sanitizer_dtor_callback(const volatile void* data, size_t size);
/* This function may be optionally provided by user and should return /* This function may be optionally provided by user and should return
a string containing Msan runtime options. See msan_flags.h for details. */ a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options(); const char* __msan_default_options();
/* Sets the callback to be called right before death on error. /* Deprecated. Call __sanitizer_set_death_callback instead. */
Passing 0 will unset the callback. */
void __msan_set_death_callback(void (*callback)(void)); void __msan_set_death_callback(void (*callback)(void));
/* Update shadow for the application copy of size bytes from src to dst.
Src and dst are application addresses. This function does not copy the
actual application memory, it only updates shadow and origin for such
copy. Source and destination regions can overlap. */
void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
size_t size);
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
#endif #endif

View File

@ -217,7 +217,6 @@ const interpose_substitution substitution_##func_name[] \
namespace __interception { \ namespace __interception { \
FUNC_TYPE(func) PTR_TO_REAL(func); \ FUNC_TYPE(func) PTR_TO_REAL(func); \
} \ } \
DECLARE_WRAPPER_WINAPI(ret_type, func, __VA_ARGS__) \
extern "C" \ extern "C" \
INTERCEPTOR_ATTRIBUTE \ INTERCEPTOR_ATTRIBUTE \
ret_type __stdcall WRAP(func)(__VA_ARGS__) ret_type __stdcall WRAP(func)(__VA_ARGS__)

View File

@ -33,12 +33,12 @@ void *GetFuncAddrVer(const char *func_name, const char *ver);
(::__interception::uptr) & WRAP(func)) (::__interception::uptr) & WRAP(func))
#if !defined(__ANDROID__) // android does not have dlvsym #if !defined(__ANDROID__) // android does not have dlvsym
# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ #define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::real_##func = (func##_f)(unsigned long) \ (::__interception::real_##func = (func##_f)( \
::__interception::GetFuncAddrVer(#func, symver) unsigned long)::__interception::GetFuncAddrVer(#func, symver))
#else #else
# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ #define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
#endif // !defined(__ANDROID__) #endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H #endif // INTERCEPTION_LINUX_H

View File

@ -82,6 +82,7 @@ static size_t RoundUpToInstrBoundary(size_t size, char *code) {
cursor += 2; cursor += 2;
continue; continue;
case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX
case '\xB8': // B8 XX YY ZZ WW = mov eax, WWZZYYXX
cursor += 5; cursor += 5;
continue; continue;
} }
@ -179,11 +180,15 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
return true; return true;
} }
static const void **InterestingDLLsAvailable() { static void **InterestingDLLsAvailable() {
const char *InterestingDLLs[] = {"kernel32.dll", const char *InterestingDLLs[] = {
"msvcr110.dll", // VS2012 "kernel32.dll",
"msvcr120.dll", // VS2013 "msvcr110.dll", // VS2012
NULL}; "msvcr120.dll", // VS2013
// NTDLL should go last as it exports some functions that we should override
// in the CRT [presumably only used internally].
"ntdll.dll", NULL
};
static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 }; static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
if (!result[0]) { if (!result[0]) {
for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) { for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
@ -191,14 +196,65 @@ static const void **InterestingDLLsAvailable() {
result[j++] = (void *)h; result[j++] = (void *)h;
} }
} }
return (const void **)&result[0]; return &result[0];
}
namespace {
// Utility for reading loaded PE images.
template <typename T> class RVAPtr {
public:
RVAPtr(void *module, uptr rva)
: ptr_(reinterpret_cast<T *>(reinterpret_cast<char *>(module) + rva)) {}
operator T *() { return ptr_; }
T *operator->() { return ptr_; }
T *operator++() { return ++ptr_; }
private:
T *ptr_;
};
} // namespace
// Internal implementation of GetProcAddress. At least since Windows 8,
// GetProcAddress appears to initialize DLLs before returning function pointers
// into them. This is problematic for the sanitizers, because they typically
// want to intercept malloc *before* MSVCRT initializes. Our internal
// implementation walks the export list manually without doing initialization.
uptr InternalGetProcAddress(void *module, const char *func_name) {
// Check that the module header is full and present.
RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
headers->Signature != IMAGE_NT_SIGNATURE || // "PE\0\0"
headers->FileHeader.SizeOfOptionalHeader <
sizeof(IMAGE_OPTIONAL_HEADER)) {
return 0;
}
IMAGE_DATA_DIRECTORY *export_directory =
&headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module,
export_directory->VirtualAddress);
RVAPtr<DWORD> functions(module, exports->AddressOfFunctions);
RVAPtr<DWORD> names(module, exports->AddressOfNames);
RVAPtr<WORD> ordinals(module, exports->AddressOfNameOrdinals);
for (DWORD i = 0; i < exports->NumberOfNames; i++) {
RVAPtr<char> name(module, names[i]);
if (!strcmp(func_name, name)) {
DWORD index = ordinals[i];
RVAPtr<char> func(module, functions[index]);
return (uptr)(char *)func;
}
}
return 0;
} }
static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) { static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) {
*func_addr = 0; *func_addr = 0;
const void **DLLs = InterestingDLLsAvailable(); void **DLLs = InterestingDLLsAvailable();
for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i) for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i)
*func_addr = (uptr)GetProcAddress((HMODULE)DLLs[i], func_name); *func_addr = InternalGetProcAddress(DLLs[i], func_name);
return (*func_addr != 0); return (*func_addr != 0);
} }

View File

@ -28,6 +28,10 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func = 0);
// Overrides a function in a system DLL or DLL CRT by its exported name. // Overrides a function in a system DLL or DLL CRT by its exported name.
bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0); bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
// Windows-only replacement for GetProcAddress. Useful for some sanitizers.
uptr InternalGetProcAddress(void *module, const char *func_name);
} // namespace __interception } // namespace __interception
#if defined(INTERCEPTION_DYNAMIC_CRT) #if defined(INTERCEPTION_DYNAMIC_CRT)

View File

@ -13,6 +13,7 @@
#include "lsan.h" #include "lsan.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_allocator.h" #include "lsan_allocator.h"
#include "lsan_common.h" #include "lsan_common.h"
@ -32,13 +33,44 @@ bool WordIsPoisoned(uptr addr) {
using namespace __lsan; // NOLINT using namespace __lsan; // NOLINT
static void InitializeFlags() {
// Set all the default values.
SetCommonFlagsDefaults();
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf.malloc_context_size = 30;
cf.detect_leaks = true;
cf.exitcode = 23;
OverrideCommonFlags(cf);
}
Flags *f = flags();
f->SetDefaults();
FlagParser parser;
RegisterLsanFlags(&parser, f);
RegisterCommonFlags(&parser);
parser.ParseString(GetEnv("LSAN_OPTIONS"));
SetVerbosity(common_flags()->verbosity);
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions();
}
extern "C" void __lsan_init() { extern "C" void __lsan_init() {
CHECK(!lsan_init_is_running); CHECK(!lsan_init_is_running);
if (lsan_inited) if (lsan_inited)
return; return;
lsan_init_is_running = true; lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer"; SanitizerToolName = "LeakSanitizer";
InitCommonLsan(true); CacheBinaryName();
InitializeFlags();
InitCommonLsan();
InitializeAllocator(); InitializeAllocator();
InitTlsSize(); InitTlsSize();
InitializeInterceptors(); InitializeInterceptors();
@ -50,6 +82,9 @@ extern "C" void __lsan_init() {
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck); Atexit(DoLeakCheck);
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
lsan_inited = true; lsan_inited = true;
lsan_init_is_running = false; lsan_init_is_running = false;
} }

View File

@ -23,19 +23,29 @@ extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan { namespace __lsan {
static const uptr kMaxAllowedMallocSize = 8UL << 30;
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
struct ChunkMetadata { struct ChunkMetadata {
bool allocated : 8; // Must be first. u8 allocated : 8; // Must be first.
ChunkTag tag : 2; ChunkTag tag : 2;
uptr requested_size : 54; uptr requested_size : 54;
u32 stack_trace_id; u32 stack_trace_id;
}; };
#if defined(__mips64)
static const uptr kMaxAllowedMallocSize = 4UL << 30;
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
typedef CompactSizeClassMap SizeClassMap;
typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator; sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator; typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
@ -45,7 +55,7 @@ static Allocator allocator;
static THREADLOCAL AllocatorCache cache; static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() { void InitializeAllocator() {
allocator.Init(); allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
} }
void AllocatorThreadFinish() { void AllocatorThreadFinish() {
@ -79,7 +89,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
size = 1; size = 1;
if (size > kMaxAllowedMallocSize) { if (size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
return 0; return nullptr;
} }
void *p = allocator.Allocate(&cache, size, alignment, false); void *p = allocator.Allocate(&cache, size, alignment, false);
// Do not rely on the allocator to clear the memory (it's slow). // Do not rely on the allocator to clear the memory (it's slow).
@ -102,7 +112,7 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
if (new_size > kMaxAllowedMallocSize) { if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
allocator.Deallocate(&cache, p); allocator.Deallocate(&cache, p);
return 0; return nullptr;
} }
p = allocator.Reallocate(&cache, p, new_size, alignment); p = allocator.Reallocate(&cache, p, new_size, alignment);
RegisterAllocation(stack, p, new_size); RegisterAllocation(stack, p, new_size);
@ -200,7 +210,7 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
return kIgnoreObjectInvalid; return kIgnoreObjectInvalid;
} }
} }
} // namespace __lsan } // namespace __lsan
using namespace __lsan; using namespace __lsan;
@ -229,10 +239,10 @@ SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_get_ownership(const void *p) { return Metadata(p) != 0; } int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_allocated_size(const void *p) { uptr __sanitizer_get_allocated_size(const void *p) {
return GetMallocUsableSize(p); return GetMallocUsableSize(p);
} }
} // extern "C" } // extern "C"

View File

@ -14,11 +14,11 @@
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_suppressions.h" #include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_report_decorator.h"
@ -34,52 +34,17 @@ bool DisabledInThisThread() { return disable_counter > 0; }
Flags lsan_flags; Flags lsan_flags;
static void InitializeFlags(bool standalone) { void Flags::SetDefaults() {
Flags *f = flags(); #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
// Default values. #include "lsan_flags.inc"
f->report_objects = false; #undef LSAN_FLAG
f->resolution = 0; }
f->max_leaks = 0;
f->exitcode = 23;
f->use_registers = true;
f->use_globals = true;
f->use_stacks = true;
f->use_tls = true;
f->use_root_regions = true;
f->use_unaligned = false;
f->use_poisoned = false;
f->log_pointers = false;
f->log_threads = false;
const char *options = GetEnv("LSAN_OPTIONS"); void RegisterLsanFlags(FlagParser *parser, Flags *f) {
if (options) { #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
ParseFlag(options, &f->use_registers, "use_registers", ""); RegisterFlag(parser, #Name, Description, &f->Name);
ParseFlag(options, &f->use_globals, "use_globals", ""); #include "lsan_flags.inc"
ParseFlag(options, &f->use_stacks, "use_stacks", ""); #undef LSAN_FLAG
ParseFlag(options, &f->use_tls, "use_tls", "");
ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
ParseFlag(options, &f->report_objects, "report_objects", "");
ParseFlag(options, &f->resolution, "resolution", "");
CHECK_GE(&f->resolution, 0);
ParseFlag(options, &f->max_leaks, "max_leaks", "");
CHECK_GE(&f->max_leaks, 0);
ParseFlag(options, &f->log_pointers, "log_pointers", "");
ParseFlag(options, &f->log_threads, "log_threads", "");
ParseFlag(options, &f->exitcode, "exitcode", "");
}
// Set defaults for common flags (only in standalone mode) and parse
// them from LSAN_OPTIONS.
CommonFlags *cf = common_flags();
if (standalone) {
SetCommonFlagsDefaults(cf);
cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf->malloc_context_size = 30;
cf->detect_leaks = true;
}
ParseCommonFlagsFromString(cf, options);
} }
#define LOG_POINTERS(...) \ #define LOG_POINTERS(...) \
@ -92,14 +57,23 @@ static void InitializeFlags(bool standalone) {
if (flags()->log_threads) Report(__VA_ARGS__); \ if (flags()->log_threads) Report(__VA_ARGS__); \
} while (0); } while (0);
static bool suppressions_inited = false; ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
static SuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
static const char *kSuppressionTypes[] = { kSuppressionLeak };
void InitializeSuppressions() { void InitializeSuppressions() {
CHECK(!suppressions_inited); CHECK_EQ(nullptr, suppression_ctx);
SuppressionContext::InitIfNecessary(); suppression_ctx = new (suppression_placeholder) // NOLINT
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
suppression_ctx->ParseFromFile(flags()->suppressions);
if (&__lsan_default_suppressions) if (&__lsan_default_suppressions)
SuppressionContext::Get()->Parse(__lsan_default_suppressions()); suppression_ctx->Parse(__lsan_default_suppressions());
suppressions_inited = true; }
static SuppressionContext *GetSuppressionContext() {
CHECK(suppression_ctx);
return suppression_ctx;
} }
struct RootRegion { struct RootRegion {
@ -115,8 +89,7 @@ void InitializeRootRegions() {
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1); root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
} }
void InitCommonLsan(bool standalone) { void InitCommonLsan() {
InitializeFlags(standalone);
InitializeRootRegions(); InitializeRootRegions();
if (common_flags()->detect_leaks) { if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if // Initialization which can fail or print warnings should only be done if
@ -139,9 +112,11 @@ static inline bool CanBeAHeapPointer(uptr p) {
// bound on heap addresses. // bound on heap addresses.
const uptr kMinAddress = 4 * 4096; const uptr kMinAddress = 4 * 4096;
if (p < kMinAddress) return false; if (p < kMinAddress) return false;
#ifdef __x86_64__ #if defined(__x86_64__)
// Accept only canonical form user-space addresses. // Accept only canonical form user-space addresses.
return ((p >> 47) == 0); return ((p >> 47) == 0);
#elif defined(__mips64)
return ((p >> 40) == 0);
#else #else
return true; return true;
#endif #endif
@ -149,13 +124,14 @@ static inline bool CanBeAHeapPointer(uptr p) {
// Scans the memory range, looking for byte patterns that point into allocator // Scans the memory range, looking for byte patterns that point into allocator
// chunks. Marks those chunks with |tag| and adds them to |frontier|. // chunks. Marks those chunks with |tag| and adds them to |frontier|.
// There are two usage modes for this function: finding reachable or ignored // There are two usage modes for this function: finding reachable chunks
// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks // (|tag| = kReachable) and finding indirectly leaked chunks
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
// so |frontier| = 0. // so |frontier| = 0.
void ScanRangeForPointers(uptr begin, uptr end, void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier, Frontier *frontier,
const char *region_type, ChunkTag tag) { const char *region_type, ChunkTag tag) {
CHECK(tag == kReachable || tag == kIndirectlyLeaked);
const uptr alignment = flags()->pointer_alignment(); const uptr alignment = flags()->pointer_alignment();
LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end); LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
uptr pp = begin; uptr pp = begin;
@ -169,9 +145,7 @@ void ScanRangeForPointers(uptr begin, uptr end,
// Pointers to self don't count. This matters when tag == kIndirectlyLeaked. // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
if (chunk == begin) continue; if (chunk == begin) continue;
LsanMetadata m(chunk); LsanMetadata m(chunk);
// Reachable beats ignored beats leaked. if (m.tag() == kReachable || m.tag() == kIgnored) continue;
if (m.tag() == kReachable) continue;
if (m.tag() == kIgnored && tag != kReachable) continue;
// Do this check relatively late so we can log only the interesting cases. // Do this check relatively late so we can log only the interesting cases.
if (!flags()->use_poisoned && WordIsPoisoned(pp)) { if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
@ -267,8 +241,8 @@ static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
MemoryMappingLayout proc_maps(/*cache_enabled*/true); MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr begin, end, prot; uptr begin, end, prot;
while (proc_maps.Next(&begin, &end, while (proc_maps.Next(&begin, &end,
/*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0, /*offset*/ nullptr, /*filename*/ nullptr,
&prot)) { /*filename_size*/ 0, &prot)) {
uptr intersection_begin = Max(root_begin, begin); uptr intersection_begin = Max(root_begin, begin);
uptr intersection_end = Min(end, root_end); uptr intersection_end = Min(end, root_end);
if (intersection_begin >= intersection_end) continue; if (intersection_begin >= intersection_end) continue;
@ -310,7 +284,7 @@ static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
LsanMetadata m(chunk); LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) { if (m.allocated() && m.tag() != kReachable) {
ScanRangeForPointers(chunk, chunk + m.requested_size(), ScanRangeForPointers(chunk, chunk + m.requested_size(),
/* frontier */ 0, "HEAP", kIndirectlyLeaked); /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
} }
} }
@ -320,8 +294,11 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
CHECK(arg); CHECK(arg);
chunk = GetUserBegin(chunk); chunk = GetUserBegin(chunk);
LsanMetadata m(chunk); LsanMetadata m(chunk);
if (m.allocated() && m.tag() == kIgnored) if (m.allocated() && m.tag() == kIgnored) {
LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
chunk, chunk + m.requested_size(), m.requested_size());
reinterpret_cast<Frontier *>(arg)->push_back(chunk); reinterpret_cast<Frontier *>(arg)->push_back(chunk);
}
} }
// Sets the appropriate tag on each chunk. // Sets the appropriate tag on each chunk.
@ -329,26 +306,33 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier. // Holds the flood fill frontier.
Frontier frontier(1); Frontier frontier(1);
ForEachChunk(CollectIgnoredCb, &frontier);
ProcessGlobalRegions(&frontier); ProcessGlobalRegions(&frontier);
ProcessThreads(suspended_threads, &frontier); ProcessThreads(suspended_threads, &frontier);
ProcessRootRegions(&frontier); ProcessRootRegions(&frontier);
FloodFillTag(&frontier, kReachable); FloodFillTag(&frontier, kReachable);
// The check here is relatively expensive, so we do this in a separate flood // The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable // fill. That way we can skip the check for chunks that are reachable
// otherwise. // otherwise.
LOG_POINTERS("Processing platform-specific allocations.\n"); LOG_POINTERS("Processing platform-specific allocations.\n");
CHECK_EQ(0, frontier.size());
ProcessPlatformSpecificAllocations(&frontier); ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable); FloodFillTag(&frontier, kReachable);
LOG_POINTERS("Scanning ignored chunks.\n");
CHECK_EQ(0, frontier.size());
ForEachChunk(CollectIgnoredCb, &frontier);
FloodFillTag(&frontier, kIgnored);
// Iterate over leaked chunks and mark those that are reachable from other // Iterate over leaked chunks and mark those that are reachable from other
// leaked chunks. // leaked chunks.
LOG_POINTERS("Scanning leaked chunks.\n"); LOG_POINTERS("Scanning leaked chunks.\n");
ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */); ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
}
// ForEachChunk callback. Resets the tags to pre-leak-check state.
static void ResetTagsCb(uptr chunk, void *arg) {
(void)arg;
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kIgnored)
m.set_tag(kDirectlyLeaked);
} }
static void PrintStackTraceById(u32 stack_trace_id) { static void PrintStackTraceById(u32 stack_trace_id) {
@ -365,7 +349,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
LsanMetadata m(chunk); LsanMetadata m(chunk);
if (!m.allocated()) return; if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
uptr resolution = flags()->resolution; u32 resolution = flags()->resolution;
u32 stack_trace_id = 0; u32 stack_trace_id = 0;
if (resolution > 0) { if (resolution > 0) {
StackTrace stack = StackDepotGet(m.stack_trace_id()); StackTrace stack = StackDepotGet(m.stack_trace_id());
@ -381,7 +365,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
static void PrintMatchedSuppressions() { static void PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched(1); InternalMmapVector<Suppression *> matched(1);
SuppressionContext::Get()->GetMatched(&matched); GetSuppressionContext()->GetMatched(&matched);
if (!matched.size()) if (!matched.size())
return; return;
const char *line = "-----------------------------------------------------"; const char *line = "-----------------------------------------------------";
@ -389,40 +373,38 @@ static void PrintMatchedSuppressions() {
Printf("Suppressions used:\n"); Printf("Suppressions used:\n");
Printf(" count bytes template\n"); Printf(" count bytes template\n");
for (uptr i = 0; i < matched.size(); i++) for (uptr i = 0; i < matched.size(); i++)
Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count), Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
matched[i]->weight, matched[i]->templ); &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
Printf("%s\n\n", line); Printf("%s\n\n", line);
} }
struct DoLeakCheckParam { struct CheckForLeaksParam {
bool success; bool success;
LeakReport leak_report; LeakReport leak_report;
}; };
static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
void *arg) { void *arg) {
DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg); CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
CHECK(param); CHECK(param);
CHECK(!param->success); CHECK(!param->success);
ClassifyAllChunks(suspended_threads); ClassifyAllChunks(suspended_threads);
ForEachChunk(CollectLeaksCb, &param->leak_report); ForEachChunk(CollectLeaksCb, &param->leak_report);
// Clean up for subsequent leak checks. This assumes we did not overwrite any
// kIgnored tags.
ForEachChunk(ResetTagsCb, nullptr);
param->success = true; param->success = true;
} }
void DoLeakCheck() { static bool CheckForLeaks() {
EnsureMainThreadIDIsCorrect();
BlockingMutexLock l(&global_mutex);
static bool already_done;
if (already_done) return;
already_done = true;
if (&__lsan_is_turned_off && __lsan_is_turned_off()) if (&__lsan_is_turned_off && __lsan_is_turned_off())
return; return false;
EnsureMainThreadIDIsCorrect();
DoLeakCheckParam param; CheckForLeaksParam param;
param.success = false; param.success = false;
LockThreadRegistry(); LockThreadRegistry();
LockAllocator(); LockAllocator();
StopTheWorld(DoLeakCheckCallback, &param); DoStopTheWorld(CheckForLeaksCallback, &param);
UnlockAllocator(); UnlockAllocator();
UnlockThreadRegistry(); UnlockThreadRegistry();
@ -446,39 +428,51 @@ void DoLeakCheck() {
PrintMatchedSuppressions(); PrintMatchedSuppressions();
if (unsuppressed_count > 0) { if (unsuppressed_count > 0) {
param.leak_report.PrintSummary(); param.leak_report.PrintSummary();
if (flags()->exitcode) { return true;
if (common_flags()->coverage) }
__sanitizer_cov_dump(); return false;
internal__exit(flags()->exitcode); }
}
void DoLeakCheck() {
BlockingMutexLock l(&global_mutex);
static bool already_done;
if (already_done) return;
already_done = true;
bool have_leaks = CheckForLeaks();
if (!have_leaks) {
return;
}
if (common_flags()->exitcode) {
Die();
} }
} }
static int DoRecoverableLeakCheck() {
BlockingMutexLock l(&global_mutex);
bool have_leaks = CheckForLeaks();
return have_leaks ? 1 : 0;
}
static Suppression *GetSuppressionForAddr(uptr addr) { static Suppression *GetSuppressionForAddr(uptr addr) {
Suppression *s; Suppression *s = nullptr;
// Suppress by module name. // Suppress by module name.
const char *module_name; SuppressionContext *suppressions = GetSuppressionContext();
uptr module_offset; if (const char *module_name =
if (Symbolizer::GetOrInit() Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
->GetModuleNameAndOffsetForPC(addr, &module_name, &module_offset) && if (suppressions->Match(module_name, kSuppressionLeak, &s))
SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s)) return s;
return s;
// Suppress by file or function name. // Suppress by file or function name.
static const uptr kMaxAddrFrames = 16; SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames); for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo(); if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC( suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
addr, addr_frames.data(), kMaxAddrFrames); break;
for (uptr i = 0; i < addr_frames_num; i++) { }
if (SuppressionContext::Get()->Match(addr_frames[i].function,
SuppressionLeak, &s) ||
SuppressionContext::Get()->Match(addr_frames[i].file, SuppressionLeak,
&s))
return s;
} }
return 0; frames->ClearAll();
return s;
} }
static Suppression *GetSuppressionForStack(u32 stack_trace_id) { static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
@ -488,7 +482,7 @@ static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
StackTrace::GetPreviousInstructionPc(stack.trace[i])); StackTrace::GetPreviousInstructionPc(stack.trace[i]));
if (s) return s; if (s) return s;
} }
return 0; return nullptr;
} }
///// LeakReport implementation. ///// ///// LeakReport implementation. /////
@ -591,10 +585,9 @@ void LeakReport::PrintSummary() {
bytes += leaks_[i].total_size; bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count; allocations += leaks_[i].hit_count;
} }
InternalScopedBuffer<char> summary(kMaxSummaryLength); InternalScopedString summary(kMaxSummaryLength);
internal_snprintf(summary.data(), summary.size(), summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
"%zu byte(s) leaked in %zu allocation(s).", bytes, allocations);
allocations);
ReportErrorSummary(summary.data()); ReportErrorSummary(summary.data());
} }
@ -603,7 +596,8 @@ void LeakReport::ApplySuppressions() {
Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
if (s) { if (s) {
s->weight += leaks_[i].total_size; s->weight += leaks_[i].total_size;
s->hit_count += leaks_[i].hit_count; atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
leaks_[i].hit_count);
leaks_[i].is_suppressed = true; leaks_[i].is_suppressed = true;
} }
} }
@ -616,8 +610,8 @@ uptr LeakReport::UnsuppressedLeakCount() {
return result; return result;
} }
} // namespace __lsan } // namespace __lsan
#endif // CAN_SANITIZE_LEAKS #endif // CAN_SANITIZE_LEAKS
using namespace __lsan; // NOLINT using namespace __lsan; // NOLINT
@ -638,7 +632,7 @@ void __lsan_ignore_object(const void *p) {
"heap object at %p is already being ignored\n", p); "heap object at %p is already being ignored\n", p);
if (res == kIgnoreObjectSuccess) if (res == kIgnoreObjectSuccess)
VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p); VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
#endif // CAN_SANITIZE_LEAKS #endif // CAN_SANITIZE_LEAKS
} }
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
@ -649,7 +643,7 @@ void __lsan_register_root_region(const void *begin, uptr size) {
RootRegion region = {begin, size}; RootRegion region = {begin, size};
root_regions->push_back(region); root_regions->push_back(region);
VReport(1, "Registered root region at %p of size %llu\n", begin, size); VReport(1, "Registered root region at %p of size %llu\n", begin, size);
#endif // CAN_SANITIZE_LEAKS #endif // CAN_SANITIZE_LEAKS
} }
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
@ -676,7 +670,7 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
begin, size); begin, size);
Die(); Die();
} }
#endif // CAN_SANITIZE_LEAKS #endif // CAN_SANITIZE_LEAKS
} }
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
@ -702,7 +696,16 @@ void __lsan_do_leak_check() {
#if CAN_SANITIZE_LEAKS #if CAN_SANITIZE_LEAKS
if (common_flags()->detect_leaks) if (common_flags()->detect_leaks)
__lsan::DoLeakCheck(); __lsan::DoLeakCheck();
#endif // CAN_SANITIZE_LEAKS #endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
int __lsan_do_recoverable_leak_check() {
#if CAN_SANITIZE_LEAKS
if (common_flags()->detect_leaks)
return __lsan::DoRecoverableLeakCheck();
#endif // CAN_SANITIZE_LEAKS
return 0;
} }
#if !SANITIZER_SUPPORTS_WEAK_HOOKS #if !SANITIZER_SUPPORTS_WEAK_HOOKS
@ -711,4 +714,4 @@ int __lsan_is_turned_off() {
return 0; return 0;
} }
#endif #endif
} // extern "C" } // extern "C"

View File

@ -17,14 +17,20 @@
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h" #include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h" #include "sanitizer_common/sanitizer_symbolizer.h"
#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64) #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips64)) \
&& (SANITIZER_WORDSIZE == 64)
#define CAN_SANITIZE_LEAKS 1 #define CAN_SANITIZE_LEAKS 1
#else #else
#define CAN_SANITIZE_LEAKS 0 #define CAN_SANITIZE_LEAKS 0
#endif #endif
namespace __sanitizer {
class FlagParser;
}
namespace __lsan { namespace __lsan {
// Chunk tags. // Chunk tags.
@ -36,44 +42,19 @@ enum ChunkTag {
}; };
struct Flags { struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
#undef LSAN_FLAG
void SetDefaults();
uptr pointer_alignment() const { uptr pointer_alignment() const {
return use_unaligned ? 1 : sizeof(uptr); return use_unaligned ? 1 : sizeof(uptr);
} }
// Print addresses of leaked objects after main leak report.
bool report_objects;
// Aggregate two objects into one leak if this many stack frames match. If
// zero, the entire stack trace must match.
int resolution;
// The number of leaks reported.
int max_leaks;
// If nonzero kill the process with this exit code upon finding leaks.
int exitcode;
// Flags controlling the root set of reachable memory.
// Global variables (.data and .bss).
bool use_globals;
// Thread stacks.
bool use_stacks;
// Thread registers.
bool use_registers;
// TLS and thread-specific storage.
bool use_tls;
// Regions added via __lsan_register_root_region().
bool use_root_regions;
// Consider unaligned pointers valid.
bool use_unaligned;
// Consider pointers found in poisoned memory to be valid.
bool use_poisoned;
// Debug logging.
bool log_pointers;
bool log_threads;
}; };
extern Flags lsan_flags; extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; } inline Flags *flags() { return &lsan_flags; }
void RegisterLsanFlags(FlagParser *parser, Flags *f);
struct Leak { struct Leak {
u32 id; u32 id;
@ -117,6 +98,8 @@ typedef InternalMmapVector<uptr> Frontier;
void InitializePlatformSpecificModules(); void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier); void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier); void ProcessPlatformSpecificAllocations(Frontier *frontier);
// Run stoptheworld while holding any platform-specific locks.
void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
void ScanRangeForPointers(uptr begin, uptr end, void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier, Frontier *frontier,
@ -129,7 +112,7 @@ enum IgnoreObjectResult {
}; };
// Functions called from the parent tool. // Functions called from the parent tool.
void InitCommonLsan(bool standalone); void InitCommonLsan();
void DoLeakCheck(); void DoLeakCheck();
bool DisabledInThisThread(); bool DisabledInThisThread();

View File

@ -27,7 +27,7 @@ static const char kLinkerName[] = "ld";
// We request 2 modules matching "ld", so we can print a warning if there's more // We request 2 modules matching "ld", so we can print a warning if there's more
// than one match. But only the first one is actually used. // than one match. But only the first one is actually used.
static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64); static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64);
static LoadedModule *linker = 0; static LoadedModule *linker = nullptr;
static bool IsLinker(const char* full_name) { static bool IsLinker(const char* full_name) {
return LibraryNameIs(full_name, kLinkerName); return LibraryNameIs(full_name, kLinkerName);
@ -47,7 +47,7 @@ void InitializePlatformSpecificModules() {
else if (num_matches > 1) else if (num_matches > 1)
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". " VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
"TLS will not be handled correctly.\n", kLinkerName); "TLS will not be handled correctly.\n", kLinkerName);
linker = 0; linker = nullptr;
} }
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
@ -83,10 +83,6 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
// Scans global variables for heap pointers. // Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) { void ProcessGlobalRegions(Frontier *frontier) {
if (!flags()->use_globals) return; if (!flags()->use_globals) return;
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
// deadlocking by running this under StopTheWorld. However, the lock is
// reentrant, so we should be able to fix this by acquiring the lock before
// suspending threads.
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier); dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
} }
@ -112,7 +108,7 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
reinterpret_cast<ProcessPlatformAllocParam *>(arg); reinterpret_cast<ProcessPlatformAllocParam *>(arg);
chunk = GetUserBegin(chunk); chunk = GetUserBegin(chunk);
LsanMetadata m(chunk); LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) { if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
u32 stack_id = m.stack_trace_id(); u32 stack_id = m.stack_trace_id();
uptr caller_pc = 0; uptr caller_pc = 0;
if (stack_id > 0) if (stack_id > 0)
@ -151,5 +147,31 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg); ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
} }
} // namespace __lsan struct DoStopTheWorldParam {
#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX StopTheWorldCallback callback;
void *argument;
};
static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
void *data) {
DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
StopTheWorld(param->callback, param->argument);
return 1;
}
// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
// of the threads is frozen while holding the libdl lock, the tracer will hang
// in dl_iterate_phdr() forever.
// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
// tracer task and the thread that spawned it. Thus, if we run the tracer task
// while holding the libdl lock in the parent thread, we can safely reenter it
// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
// callback in the parent thread.
void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
DoStopTheWorldParam param = {callback, argument};
dl_iterate_phdr(DoStopTheWorldCallback, &param);
}
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX

View File

@ -0,0 +1,41 @@
//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// LSan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_FLAG
# error "Define LSAN_FLAG prior to including this file!"
#endif
// LSAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
LSAN_FLAG(bool, report_objects, false,
"Print addresses of leaked objects after main leak report.")
LSAN_FLAG(
int, resolution, 0,
"Aggregate two objects into one leak if this many stack frames match. If "
"zero, the entire stack trace must match.")
LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.")
// Flags controlling the root set of reachable memory.
LSAN_FLAG(bool, use_globals, true,
"Root set: include global variables (.data and .bss)")
LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
LSAN_FLAG(bool, use_tls, true,
"Root set: include TLS and thread-specific storage")
LSAN_FLAG(bool, use_root_regions, true,
"Root set: include regions added via __lsan_register_root_region().")
LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
LSAN_FLAG(bool, use_poisoned, false,
"Consider pointers found in poisoned memory to be valid.")
LSAN_FLAG(bool, log_pointers, false, "Debug logging")
LSAN_FLAG(bool, log_threads, false, "Debug logging")
LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")

View File

@ -10,11 +10,11 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_interception.h"
#include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h"
@ -69,7 +69,7 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
CHECK(allocated < kCallocPoolSize); CHECK(allocated < kCallocPoolSize);
return mem; return mem;
} }
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr;
ENSURE_LSAN_INITED; ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC; GET_STACK_TRACE_MALLOC;
size *= nmemb; size *= nmemb;
@ -162,9 +162,9 @@ void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
Deallocate(ptr); Deallocate(ptr);
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; } void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; } void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
@ -206,16 +206,16 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
// Wait until the last iteration to maximize the chance that we are the last // Wait until the last iteration to maximize the chance that we are the last
// destructor to run. // destructor to run.
if (pthread_setspecific(g_thread_finalize_key, if (pthread_setspecific(g_thread_finalize_key,
(void*)kPthreadDestructorIterations)) { (void*)GetPthreadDestructorIterations())) {
Report("LeakSanitizer: failed to set thread key.\n"); Report("LeakSanitizer: failed to set thread key.\n");
Die(); Die();
} }
int tid = 0; int tid = 0;
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield(); internal_sched_yield();
atomic_store(&p->tid, 0, memory_order_release);
SetCurrentThread(tid); SetCurrentThread(tid);
ThreadStart(tid, GetTid()); ThreadStart(tid, GetTid());
atomic_store(&p->tid, 0, memory_order_release);
return callback(param); return callback(param);
} }
@ -224,7 +224,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
ENSURE_LSAN_INITED; ENSURE_LSAN_INITED;
EnsureMainThreadIDIsCorrect(); EnsureMainThreadIDIsCorrect();
__sanitizer_pthread_attr_t myattr; __sanitizer_pthread_attr_t myattr;
if (attr == 0) { if (!attr) {
pthread_attr_init(&myattr); pthread_attr_init(&myattr);
attr = &myattr; attr = &myattr;
} }
@ -282,4 +282,4 @@ void InitializeInterceptors() {
} }
} }
} // namespace __lsan } // namespace __lsan

View File

@ -77,7 +77,7 @@ void ThreadContext::OnFinished() {
u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) { u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
return thread_registry->CreateThread(user_id, detached, parent_tid, return thread_registry->CreateThread(user_id, detached, parent_tid,
/* arg */ 0); /* arg */ nullptr);
} }
void ThreadStart(u32 tid, uptr os_id) { void ThreadStart(u32 tid, uptr os_id) {
@ -97,9 +97,9 @@ void ThreadFinish() {
} }
ThreadContext *CurrentThreadContext() { ThreadContext *CurrentThreadContext() {
if (!thread_registry) return 0; if (!thread_registry) return nullptr;
if (GetCurrentThread() == kInvalidTid) if (GetCurrentThread() == kInvalidTid)
return 0; return nullptr;
// No lock needed when getting current thread. // No lock needed when getting current thread.
return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread()); return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
} }
@ -118,7 +118,7 @@ u32 ThreadTid(uptr uid) {
void ThreadJoin(u32 tid) { void ThreadJoin(u32 tid) {
CHECK_NE(tid, kInvalidTid); CHECK_NE(tid, kInvalidTid);
thread_registry->JoinThread(tid, /* arg */0); thread_registry->JoinThread(tid, /* arg */nullptr);
} }
void EnsureMainThreadIDIsCorrect() { void EnsureMainThreadIDIsCorrect() {
@ -155,4 +155,4 @@ void UnlockThreadRegistry() {
thread_registry->Unlock(); thread_registry->Unlock();
} }
} // namespace __lsan } // namespace __lsan

View File

@ -20,8 +20,8 @@ namespace __lsan {
class ThreadContext : public ThreadContextBase { class ThreadContext : public ThreadContextBase {
public: public:
explicit ThreadContext(int tid); explicit ThreadContext(int tid);
void OnStarted(void *arg); void OnStarted(void *arg) override;
void OnFinished(); void OnFinished() override;
uptr stack_begin() { return stack_begin_; } uptr stack_begin() { return stack_begin_; }
uptr stack_end() { return stack_end_; } uptr stack_end() { return stack_end_; }
uptr tls_begin() { return tls_begin_; } uptr tls_begin() { return tls_begin_; }

View File

@ -27,6 +27,7 @@ sanitizer_common_files = \
sanitizer_deadlock_detector1.cc \ sanitizer_deadlock_detector1.cc \
sanitizer_deadlock_detector2.cc \ sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \ sanitizer_flags.cc \
sanitizer_flag_parser.cc \
sanitizer_libc.cc \ sanitizer_libc.cc \
sanitizer_libignore.cc \ sanitizer_libignore.cc \
sanitizer_linux.cc \ sanitizer_linux.cc \
@ -45,6 +46,7 @@ sanitizer_common_files = \
sanitizer_stackdepot.cc \ sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \ sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \ sanitizer_stacktrace_libcdep.cc \
sanitizer_symbolizer_mac.cc \
sanitizer_stacktrace_printer.cc \ sanitizer_stacktrace_printer.cc \
sanitizer_stoptheworld_linux_libcdep.cc \ sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \ sanitizer_suppressions.cc \
@ -55,7 +57,7 @@ sanitizer_common_files = \
sanitizer_symbolizer_win.cc \ sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \ sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \ sanitizer_tls_get_addr.cc \
sanitizer_unwind_posix_libcdep.cc \ sanitizer_unwind_linux_libcdep.cc \
sanitizer_win.cc sanitizer_win.cc

View File

@ -85,7 +85,8 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_coverage_mapping_libcdep.lo \ sanitizer_coverage_mapping_libcdep.lo \
sanitizer_deadlock_detector1.lo \ sanitizer_deadlock_detector1.lo \
sanitizer_deadlock_detector2.lo sanitizer_flags.lo \ sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \ sanitizer_flag_parser.lo sanitizer_libc.lo \
sanitizer_libignore.lo sanitizer_linux.lo \
sanitizer_linux_libcdep.lo sanitizer_mac.lo \ sanitizer_linux_libcdep.lo sanitizer_mac.lo \
sanitizer_persistent_allocator.lo \ sanitizer_persistent_allocator.lo \
sanitizer_platform_limits_linux.lo \ sanitizer_platform_limits_linux.lo \
@ -94,7 +95,7 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_procmaps_common.lo sanitizer_procmaps_freebsd.lo \ sanitizer_procmaps_common.lo sanitizer_procmaps_freebsd.lo \
sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \ sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \ sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
sanitizer_stacktrace_libcdep.lo \ sanitizer_stacktrace_libcdep.lo sanitizer_symbolizer_mac.lo \
sanitizer_stacktrace_printer.lo \ sanitizer_stacktrace_printer.lo \
sanitizer_stoptheworld_linux_libcdep.lo \ sanitizer_stoptheworld_linux_libcdep.lo \
sanitizer_suppressions.lo sanitizer_symbolizer.lo \ sanitizer_suppressions.lo sanitizer_symbolizer.lo \
@ -102,7 +103,7 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_symbolizer_libcdep.lo \ sanitizer_symbolizer_libcdep.lo \
sanitizer_symbolizer_posix_libcdep.lo \ sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \ sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
sanitizer_tls_get_addr.lo sanitizer_unwind_posix_libcdep.lo \ sanitizer_tls_get_addr.lo sanitizer_unwind_linux_libcdep.lo \
sanitizer_win.lo sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1) am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS) libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
@ -290,6 +291,7 @@ sanitizer_common_files = \
sanitizer_deadlock_detector1.cc \ sanitizer_deadlock_detector1.cc \
sanitizer_deadlock_detector2.cc \ sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \ sanitizer_flags.cc \
sanitizer_flag_parser.cc \
sanitizer_libc.cc \ sanitizer_libc.cc \
sanitizer_libignore.cc \ sanitizer_libignore.cc \
sanitizer_linux.cc \ sanitizer_linux.cc \
@ -308,6 +310,7 @@ sanitizer_common_files = \
sanitizer_stackdepot.cc \ sanitizer_stackdepot.cc \
sanitizer_stacktrace.cc \ sanitizer_stacktrace.cc \
sanitizer_stacktrace_libcdep.cc \ sanitizer_stacktrace_libcdep.cc \
sanitizer_symbolizer_mac.cc \
sanitizer_stacktrace_printer.cc \ sanitizer_stacktrace_printer.cc \
sanitizer_stoptheworld_linux_libcdep.cc \ sanitizer_stoptheworld_linux_libcdep.cc \
sanitizer_suppressions.cc \ sanitizer_suppressions.cc \
@ -318,7 +321,7 @@ sanitizer_common_files = \
sanitizer_symbolizer_win.cc \ sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \ sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \ sanitizer_tls_get_addr.cc \
sanitizer_unwind_posix_libcdep.cc \ sanitizer_unwind_linux_libcdep.cc \
sanitizer_win.cc sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files) libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
@ -421,6 +424,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_mapping_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_mapping_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flag_parser.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
@ -446,11 +450,12 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libbacktrace.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libbacktrace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_posix_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
.cc.o: .cc.o:

View File

@ -141,7 +141,7 @@ bool AddrHashMap<T, kSize>::Handle::created() const {
template<typename T, uptr kSize> template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::exists() const { bool AddrHashMap<T, kSize>::Handle::exists() const {
return cell_ != 0; return cell_ != nullptr;
} }
template<typename T, uptr kSize> template<typename T, uptr kSize>
@ -158,7 +158,7 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
h->created_ = false; h->created_ = false;
h->addidx_ = -1U; h->addidx_ = -1U;
h->bucket_ = b; h->bucket_ = b;
h->cell_ = 0; h->cell_ = nullptr;
// If we want to remove the element, we need exclusive access to the bucket, // If we want to remove the element, we need exclusive access to the bucket,
// so skip the lock-free phase. // so skip the lock-free phase.
@ -248,7 +248,7 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
} }
// Store in the add cells. // Store in the add cells.
if (add == 0) { if (!add) {
// Allocate a new add array. // Allocate a new add array.
const uptr kInitSize = 64; const uptr kInitSize = 64;
add = (AddBucket*)InternalAlloc(kInitSize); add = (AddBucket*)InternalAlloc(kInitSize);
@ -280,7 +280,7 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
template<typename T, uptr kSize> template<typename T, uptr kSize>
void AddrHashMap<T, kSize>::release(Handle *h) { void AddrHashMap<T, kSize>::release(Handle *h) {
if (h->cell_ == 0) if (!h->cell_)
return; return;
Bucket *b = h->bucket_; Bucket *b = h->bucket_;
Cell *c = h->cell_; Cell *c = h->cell_;

View File

@ -9,10 +9,10 @@
// run-time libraries. // run-time libraries.
// This allocator is used inside run-times. // This allocator is used inside run-times.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h" #include "sanitizer_allocator.h"
#include "sanitizer_allocator_internal.h" #include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_flags.h"
namespace __sanitizer { namespace __sanitizer {
@ -43,7 +43,7 @@ InternalAllocator *internal_allocator() {
return 0; return 0;
} }
#else // SANITIZER_GO #else // SANITIZER_GO
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized; static atomic_uint8_t internal_allocator_initialized;
@ -59,7 +59,7 @@ InternalAllocator *internal_allocator() {
SpinMutexLock l(&internal_alloc_init_mu); SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) { 0) {
internal_allocator_instance->Init(); internal_allocator_instance->Init(/* may_return_null*/ false);
atomic_store(&internal_allocator_initialized, 1, memory_order_release); atomic_store(&internal_allocator_initialized, 1, memory_order_release);
} }
} }
@ -76,29 +76,29 @@ static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
} }
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
if (cache == 0) { if (!cache) {
SpinMutexLock l(&internal_allocator_cache_mu); SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
} }
internal_allocator()->Deallocate(cache, ptr); internal_allocator()->Deallocate(cache, ptr);
} }
#endif // SANITIZER_GO #endif // SANITIZER_GO
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache) { void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
if (size + sizeof(u64) < size) if (size + sizeof(u64) < size)
return 0; return nullptr;
void *p = RawInternalAlloc(size + sizeof(u64), cache); void *p = RawInternalAlloc(size + sizeof(u64), cache);
if (p == 0) if (!p)
return 0; return nullptr;
((u64*)p)[0] = kBlockMagic; ((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64); return (char*)p + sizeof(u64);
} }
void InternalFree(void *addr, InternalAllocatorCache *cache) { void InternalFree(void *addr, InternalAllocatorCache *cache) {
if (addr == 0) if (!addr)
return; return;
addr = (char*)addr - sizeof(u64); addr = (char*)addr - sizeof(u64);
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
@ -138,14 +138,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n; return (max / size) < n;
} }
void *AllocatorReturnNull() { void NORETURN ReportAllocatorCannotReturnNull() {
if (common_flags()->allocator_may_return_null)
return 0;
Report("%s's allocator is terminating the process instead of returning 0\n", Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName); SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n"); Report("If you don't like this behavior set allocator_may_return_null=1\n");
CHECK(0); CHECK(0);
return 0; Die();
} }
} // namespace __sanitizer } // namespace __sanitizer

View File

@ -21,8 +21,8 @@
namespace __sanitizer { namespace __sanitizer {
// Depending on allocator_may_return_null either return 0 or crash. // Prints error message and kills the program.
void *AllocatorReturnNull(); void NORETURN ReportAllocatorCannotReturnNull();
// SizeClassMap maps allocation sizes into size classes and back. // SizeClassMap maps allocation sizes into size classes and back.
// Class 0 corresponds to size 0. // Class 0 corresponds to size 0.
@ -209,6 +209,7 @@ class AllocatorStats {
void Init() { void Init() {
internal_memset(this, 0, sizeof(*this)); internal_memset(this, 0, sizeof(*this));
} }
void InitLinkerInitialized() {}
void Add(AllocatorStat i, uptr v) { void Add(AllocatorStat i, uptr v) {
v += atomic_load(&stats_[i], memory_order_relaxed); v += atomic_load(&stats_[i], memory_order_relaxed);
@ -238,11 +239,14 @@ class AllocatorStats {
// Global stats, used for aggregation and querying. // Global stats, used for aggregation and querying.
class AllocatorGlobalStats : public AllocatorStats { class AllocatorGlobalStats : public AllocatorStats {
public: public:
void Init() { void InitLinkerInitialized() {
internal_memset(this, 0, sizeof(*this));
next_ = this; next_ = this;
prev_ = this; prev_ = this;
} }
void Init() {
internal_memset(this, 0, sizeof(*this));
InitLinkerInitialized();
}
void Register(AllocatorStats *s) { void Register(AllocatorStats *s) {
SpinMutexLock l(&mu_); SpinMutexLock l(&mu_);
@ -317,7 +321,7 @@ class SizeClassAllocator64 {
void Init() { void Init() {
CHECK_EQ(kSpaceBeg, CHECK_EQ(kSpaceBeg,
reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize))); reinterpret_cast<uptr>(MmapNoAccess(kSpaceBeg, kSpaceSize)));
MapWithCallback(kSpaceEnd, AdditionalSize()); MapWithCallback(kSpaceEnd, AdditionalSize());
} }
@ -341,7 +345,7 @@ class SizeClassAllocator64 {
CHECK_LT(class_id, kNumClasses); CHECK_LT(class_id, kNumClasses);
RegionInfo *region = GetRegionInfo(class_id); RegionInfo *region = GetRegionInfo(class_id);
Batch *b = region->free_list.Pop(); Batch *b = region->free_list.Pop();
if (b == 0) if (!b)
b = PopulateFreeList(stat, c, class_id, region); b = PopulateFreeList(stat, c, class_id, region);
region->n_allocated += b->count; region->n_allocated += b->count;
return b; return b;
@ -365,16 +369,16 @@ class SizeClassAllocator64 {
void *GetBlockBegin(const void *p) { void *GetBlockBegin(const void *p) {
uptr class_id = GetSizeClass(p); uptr class_id = GetSizeClass(p);
uptr size = SizeClassMap::Size(class_id); uptr size = SizeClassMap::Size(class_id);
if (!size) return 0; if (!size) return nullptr;
uptr chunk_idx = GetChunkIdx((uptr)p, size); uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = (uptr)p & ~(kRegionSize - 1); uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
uptr beg = chunk_idx * size; uptr beg = chunk_idx * size;
uptr next_beg = beg + size; uptr next_beg = beg + size;
if (class_id >= kNumClasses) return 0; if (class_id >= kNumClasses) return nullptr;
RegionInfo *region = GetRegionInfo(class_id); RegionInfo *region = GetRegionInfo(class_id);
if (region->mapped_user >= next_beg) if (region->mapped_user >= next_beg)
return reinterpret_cast<void*>(reg_beg + beg); return reinterpret_cast<void*>(reg_beg + beg);
return 0; return nullptr;
} }
static uptr GetActuallyAllocatedSize(void *p) { static uptr GetActuallyAllocatedSize(void *p) {
@ -603,6 +607,7 @@ class TwoLevelByteMap {
internal_memset(map1_, 0, sizeof(map1_)); internal_memset(map1_, 0, sizeof(map1_));
mu_.Init(); mu_.Init();
} }
void TestOnlyUnmap() { void TestOnlyUnmap() {
for (uptr i = 0; i < kSize1; i++) { for (uptr i = 0; i < kSize1; i++) {
u8 *p = Get(i); u8 *p = Get(i);
@ -816,6 +821,10 @@ class SizeClassAllocator32 {
void PrintStats() { void PrintStats() {
} }
static uptr AdditionalSize() {
return 0;
}
typedef SizeClassMap SizeClassMapT; typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses; static const uptr kNumClasses = SizeClassMap::kNumClasses;
@ -862,9 +871,9 @@ class SizeClassAllocator32 {
uptr reg = AllocateRegion(stat, class_id); uptr reg = AllocateRegion(stat, class_id);
uptr n_chunks = kRegionSize / (size + kMetadataSize); uptr n_chunks = kRegionSize / (size + kMetadataSize);
uptr max_count = SizeClassMap::MaxCached(class_id); uptr max_count = SizeClassMap::MaxCached(class_id);
Batch *b = 0; Batch *b = nullptr;
for (uptr i = reg; i < reg + n_chunks * size; i += size) { for (uptr i = reg; i < reg + n_chunks * size; i += size) {
if (b == 0) { if (!b) {
if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch))); b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
else else
@ -875,7 +884,7 @@ class SizeClassAllocator32 {
if (b->count == max_count) { if (b->count == max_count) {
CHECK_GT(b->count, 0); CHECK_GT(b->count, 0);
sci->free_list.push_back(b); sci->free_list.push_back(b);
b = 0; b = nullptr;
} }
} }
if (b) { if (b) {
@ -1000,9 +1009,14 @@ struct SizeClassAllocatorLocalCache {
template <class MapUnmapCallback = NoOpMapUnmapCallback> template <class MapUnmapCallback = NoOpMapUnmapCallback>
class LargeMmapAllocator { class LargeMmapAllocator {
public: public:
void Init() { void InitLinkerInitialized(bool may_return_null) {
internal_memset(this, 0, sizeof(*this));
page_size_ = GetPageSizeCached(); page_size_ = GetPageSizeCached();
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
void Init(bool may_return_null) {
internal_memset(this, 0, sizeof(*this));
InitLinkerInitialized(may_return_null);
} }
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) { void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
@ -1010,7 +1024,9 @@ class LargeMmapAllocator {
uptr map_size = RoundUpMapSize(size); uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_) if (alignment > page_size_)
map_size += alignment; map_size += alignment;
if (map_size < size) return AllocatorReturnNull(); // Overflow. // Overflow.
if (map_size < size)
return ReturnNullOrDie();
uptr map_beg = reinterpret_cast<uptr>( uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator")); MmapOrDie(map_size, "LargeMmapAllocator"));
CHECK(IsAligned(map_beg, page_size_)); CHECK(IsAligned(map_beg, page_size_));
@ -1046,6 +1062,16 @@ class LargeMmapAllocator {
return reinterpret_cast<void*>(res); return reinterpret_cast<void*>(res);
} }
void *ReturnNullOrDie() {
if (atomic_load(&may_return_null_, memory_order_acquire))
return nullptr;
ReportAllocatorCannotReturnNull();
}
void SetMayReturnNull(bool may_return_null) {
atomic_store(&may_return_null_, may_return_null, memory_order_release);
}
void Deallocate(AllocatorStats *stat, void *p) { void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p); Header *h = GetHeader(p);
{ {
@ -1078,7 +1104,7 @@ class LargeMmapAllocator {
} }
bool PointerIsMine(const void *p) { bool PointerIsMine(const void *p) {
return GetBlockBegin(p) != 0; return GetBlockBegin(p) != nullptr;
} }
uptr GetActuallyAllocatedSize(void *p) { uptr GetActuallyAllocatedSize(void *p) {
@ -1107,13 +1133,13 @@ class LargeMmapAllocator {
nearest_chunk = ch; nearest_chunk = ch;
} }
if (!nearest_chunk) if (!nearest_chunk)
return 0; return nullptr;
Header *h = reinterpret_cast<Header *>(nearest_chunk); Header *h = reinterpret_cast<Header *>(nearest_chunk);
CHECK_GE(nearest_chunk, h->map_beg); CHECK_GE(nearest_chunk, h->map_beg);
CHECK_LT(nearest_chunk, h->map_beg + h->map_size); CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
CHECK_LE(nearest_chunk, p); CHECK_LE(nearest_chunk, p);
if (h->map_beg + h->map_size <= p) if (h->map_beg + h->map_size <= p)
return 0; return nullptr;
return GetUser(h); return GetUser(h);
} }
@ -1123,7 +1149,7 @@ class LargeMmapAllocator {
mutex_.CheckLocked(); mutex_.CheckLocked();
uptr p = reinterpret_cast<uptr>(ptr); uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_; uptr n = n_chunks_;
if (!n) return 0; if (!n) return nullptr;
if (!chunks_sorted_) { if (!chunks_sorted_) {
// Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate. // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
SortArray(reinterpret_cast<uptr*>(chunks_), n); SortArray(reinterpret_cast<uptr*>(chunks_), n);
@ -1135,7 +1161,7 @@ class LargeMmapAllocator {
chunks_[n - 1]->map_size; chunks_[n - 1]->map_size;
} }
if (p < min_mmap_ || p >= max_mmap_) if (p < min_mmap_ || p >= max_mmap_)
return 0; return nullptr;
uptr beg = 0, end = n - 1; uptr beg = 0, end = n - 1;
// This loop is a log(n) lower_bound. It does not check for the exact match // This loop is a log(n) lower_bound. It does not check for the exact match
// to avoid expensive cache-thrashing loads. // to avoid expensive cache-thrashing loads.
@ -1156,7 +1182,7 @@ class LargeMmapAllocator {
Header *h = chunks_[beg]; Header *h = chunks_[beg];
if (h->map_beg + h->map_size <= p || p < h->map_beg) if (h->map_beg + h->map_size <= p || p < h->map_beg)
return 0; return nullptr;
return GetUser(h); return GetUser(h);
} }
@ -1224,6 +1250,7 @@ class LargeMmapAllocator {
struct Stats { struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64]; uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats; } stats;
atomic_uint8_t may_return_null_;
SpinMutex mutex_; SpinMutex mutex_;
}; };
@ -1237,19 +1264,32 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT class SecondaryAllocator> // NOLINT
class CombinedAllocator { class CombinedAllocator {
public: public:
void Init() { void InitCommon(bool may_return_null) {
primary_.Init(); primary_.Init();
secondary_.Init(); atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
void InitLinkerInitialized(bool may_return_null) {
secondary_.InitLinkerInitialized(may_return_null);
stats_.InitLinkerInitialized();
InitCommon(may_return_null);
}
void Init(bool may_return_null) {
secondary_.Init(may_return_null);
stats_.Init(); stats_.Init();
InitCommon(may_return_null);
} }
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
bool cleared = false) { bool cleared = false, bool check_rss_limit = false) {
// Returning 0 on malloc(0) may break a lot of code. // Returning 0 on malloc(0) may break a lot of code.
if (size == 0) if (size == 0)
size = 1; size = 1;
if (size + alignment < size) if (size + alignment < size)
return AllocatorReturnNull(); return ReturnNullOrDie();
if (check_rss_limit && RssLimitIsExceeded())
return ReturnNullOrDie();
if (alignment > 8) if (alignment > 8)
size = RoundUpTo(size, alignment); size = RoundUpTo(size, alignment);
void *res; void *res;
@ -1265,6 +1305,30 @@ class CombinedAllocator {
return res; return res;
} }
bool MayReturnNull() const {
return atomic_load(&may_return_null_, memory_order_acquire);
}
void *ReturnNullOrDie() {
if (MayReturnNull())
return nullptr;
ReportAllocatorCannotReturnNull();
}
void SetMayReturnNull(bool may_return_null) {
secondary_.SetMayReturnNull(may_return_null);
atomic_store(&may_return_null_, may_return_null, memory_order_release);
}
bool RssLimitIsExceeded() {
return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
}
void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
memory_order_release);
}
void Deallocate(AllocatorCache *cache, void *p) { void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return; if (!p) return;
if (primary_.PointerIsMine(p)) if (primary_.PointerIsMine(p))
@ -1279,7 +1343,7 @@ class CombinedAllocator {
return Allocate(cache, new_size, alignment); return Allocate(cache, new_size, alignment);
if (!new_size) { if (!new_size) {
Deallocate(cache, p); Deallocate(cache, p);
return 0; return nullptr;
} }
CHECK(PointerIsMine(p)); CHECK(PointerIsMine(p));
uptr old_size = GetActuallyAllocatedSize(p); uptr old_size = GetActuallyAllocatedSize(p);
@ -1377,11 +1441,13 @@ class CombinedAllocator {
PrimaryAllocator primary_; PrimaryAllocator primary_;
SecondaryAllocator secondary_; SecondaryAllocator secondary_;
AllocatorGlobalStats stats_; AllocatorGlobalStats stats_;
atomic_uint8_t may_return_null_;
atomic_uint8_t rss_limit_is_exceeded_;
}; };
// Returns true if calloc(size, n) should return 0 due to overflow in size*n. // Returns true if calloc(size, n) should return 0 due to overflow in size*n.
bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n); bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
} // namespace __sanitizer } // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H #endif // SANITIZER_ALLOCATOR_H

View File

@ -1,4 +1,4 @@
//===-- sanitizer_allocator_internal.h -------------------------- C++ -----===// //===-- sanitizer_allocator_internal.h --------------------------*- C++ -*-===//
// //
// This file is distributed under the University of Illinois Open Source // This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details. // License. See LICENSE.TXT for details.
@ -43,10 +43,19 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache, typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
LargeMmapAllocator<> > InternalAllocator; LargeMmapAllocator<> > InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = 0); void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr);
void InternalFree(void *p, InternalAllocatorCache *cache = 0); void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
InternalAllocator *internal_allocator(); InternalAllocator *internal_allocator();
} // namespace __sanitizer enum InternalAllocEnum {
INTERNAL_ALLOC
};
#endif // SANITIZER_ALLOCATOR_INTERNAL_H } // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
InternalAllocEnum) {
return InternalAlloc(size);
}
#endif // SANITIZER_ALLOCATOR_INTERNAL_H

View File

@ -53,7 +53,7 @@ struct atomic_uintptr_t {
} // namespace __sanitizer } // namespace __sanitizer
#if defined(__GNUC__) #if defined(__clang__) || defined(__GNUC__)
# include "sanitizer_atomic_clang.h" # include "sanitizer_atomic_clang.h"
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
# include "sanitizer_atomic_msvc.h" # include "sanitizer_atomic_msvc.h"
@ -61,4 +61,20 @@ struct atomic_uintptr_t {
# error "Unsupported compiler" # error "Unsupported compiler"
#endif #endif
namespace __sanitizer {
// Clutter-reducing helpers.
template<typename T>
INLINE typename T::Type atomic_load_relaxed(const volatile T *a) {
return atomic_load(a, memory_order_relaxed);
}
template<typename T>
INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) {
atomic_store(a, v, memory_order_relaxed);
}
} // namespace __sanitizer
#endif // SANITIZER_ATOMIC_H #endif // SANITIZER_ATOMIC_H

View File

@ -19,6 +19,15 @@ extern "C" void _mm_mfence();
#pragma intrinsic(_mm_mfence) #pragma intrinsic(_mm_mfence)
extern "C" void _mm_pause(); extern "C" void _mm_pause();
#pragma intrinsic(_mm_pause) #pragma intrinsic(_mm_pause)
extern "C" char _InterlockedExchange8( // NOLINT
char volatile *Addend, char Value); // NOLINT
#pragma intrinsic(_InterlockedExchange8)
extern "C" short _InterlockedExchange16( // NOLINT
short volatile *Addend, short Value); // NOLINT
#pragma intrinsic(_InterlockedExchange16)
extern "C" long _InterlockedExchange( // NOLINT
long volatile *Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchange)
extern "C" long _InterlockedExchangeAdd( // NOLINT extern "C" long _InterlockedExchangeAdd( // NOLINT
long volatile * Addend, long Value); // NOLINT long volatile * Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd) #pragma intrinsic(_InterlockedExchangeAdd)
@ -143,28 +152,25 @@ INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
u8 v, memory_order mo) { u8 v, memory_order mo) {
(void)mo; (void)mo;
DCHECK(!((uptr)a % sizeof(*a))); DCHECK(!((uptr)a % sizeof(*a)));
__asm { return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
mov eax, a
mov cl, v
xchg [eax], cl // NOLINT
mov v, cl
}
return v;
} }
INLINE u16 atomic_exchange(volatile atomic_uint16_t *a, INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
u16 v, memory_order mo) { u16 v, memory_order mo) {
(void)mo; (void)mo;
DCHECK(!((uptr)a % sizeof(*a))); DCHECK(!((uptr)a % sizeof(*a)));
__asm { return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
mov eax, a
mov cx, v
xchg [eax], cx // NOLINT
mov v, cx
}
return v;
} }
INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
}
#ifndef _WIN64
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a, INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
u8 *cmp, u8 *cmp,
u8 xchgv, u8 xchgv,
@ -186,6 +192,8 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
return false; return false;
} }
#endif
INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr *cmp, uptr *cmp,
uptr xchg, uptr xchg,

View File

@ -10,13 +10,19 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_flags.h" #include "sanitizer_flags.h"
#include "sanitizer_libc.h" #include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_stacktrace_printer.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer { namespace __sanitizer {
const char *SanitizerToolName = "SanitizerTool"; const char *SanitizerToolName = "SanitizerTool";
atomic_uint32_t current_verbosity;
uptr GetPageSizeCached() { uptr GetPageSizeCached() {
static uptr PageSize; static uptr PageSize;
if (!PageSize) if (!PageSize)
@ -24,19 +30,71 @@ uptr GetPageSizeCached() {
return PageSize; return PageSize;
} }
StaticSpinMutex report_file_mu;
ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid| void RawWrite(const char *buffer) {
// isn't equal to the current PID, try to obtain file descriptor by opening report_file.Write(buffer, internal_strlen(buffer));
// file "report_path_prefix.<PID>". }
fd_t report_fd = kStderrFd;
// Set via __sanitizer_set_report_path. void ReportFile::ReopenIfNecessary() {
bool log_to_file = false; mu->CheckLocked();
char report_path_prefix[sizeof(report_path_prefix)]; if (fd == kStdoutFd || fd == kStderrFd) return;
// PID of process that opened |report_fd|. If a fork() occurs, the PID of the uptr pid = internal_getpid();
// child thread will be different from |report_fd_pid|. // If in tracer, use the parent's file.
uptr report_fd_pid = 0; if (pid == stoptheworld_tracer_pid)
pid = stoptheworld_tracer_ppid;
if (fd != kInvalidFd) {
// If the report file is already opened by the current process,
// do nothing. Otherwise the report file was opened by the parent
// process, close it now.
if (fd_pid == pid)
return;
else
CloseFile(fd);
}
const char *exe_name = GetProcessName();
if (common_flags()->log_exe_name && exe_name) {
internal_snprintf(full_path, kMaxPathLength, "%s.%s.%zu", path_prefix,
exe_name, pid);
} else {
internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
}
fd = OpenFile(full_path, WrOnly);
if (fd == kInvalidFd) {
const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
Die();
}
fd_pid = pid;
}
void ReportFile::SetReportPath(const char *path) {
if (!path)
return;
uptr len = internal_strlen(path);
if (len > sizeof(path_prefix) - 100) {
Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
path[0], path[1], path[2], path[3],
path[4], path[5], path[6], path[7]);
Die();
}
SpinMutexLock l(mu);
if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
CloseFile(fd);
fd = kInvalidFd;
if (internal_strcmp(path, "stdout") == 0) {
fd = kStdoutFd;
} else if (internal_strcmp(path, "stderr") == 0) {
fd = kStderrFd;
} else {
internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
}
}
// PID of the tracer task in StopTheWorld. It shares the address space with the // PID of the tracer task in StopTheWorld. It shares the address space with the
// main process, but has a different PID and thus requires special handling. // main process, but has a different PID and thus requires special handling.
@ -45,20 +103,47 @@ uptr stoptheworld_tracer_pid = 0;
// writing to the same log file. // writing to the same log file.
uptr stoptheworld_tracer_ppid = 0; uptr stoptheworld_tracer_ppid = 0;
static DieCallbackType DieCallback; static const int kMaxNumOfInternalDieCallbacks = 5;
void SetDieCallback(DieCallbackType callback) { static DieCallbackType InternalDieCallbacks[kMaxNumOfInternalDieCallbacks];
DieCallback = callback;
bool AddDieCallback(DieCallbackType callback) {
for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
if (InternalDieCallbacks[i] == nullptr) {
InternalDieCallbacks[i] = callback;
return true;
}
}
return false;
} }
DieCallbackType GetDieCallback() { bool RemoveDieCallback(DieCallbackType callback) {
return DieCallback; for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
if (InternalDieCallbacks[i] == callback) {
internal_memmove(&InternalDieCallbacks[i], &InternalDieCallbacks[i + 1],
sizeof(InternalDieCallbacks[0]) *
(kMaxNumOfInternalDieCallbacks - i - 1));
InternalDieCallbacks[kMaxNumOfInternalDieCallbacks - 1] = nullptr;
return true;
}
}
return false;
}
static DieCallbackType UserDieCallback;
void SetUserDieCallback(DieCallbackType callback) {
UserDieCallback = callback;
} }
void NORETURN Die() { void NORETURN Die() {
if (DieCallback) { if (UserDieCallback)
DieCallback(); UserDieCallback();
for (int i = kMaxNumOfInternalDieCallbacks - 1; i >= 0; i--) {
if (InternalDieCallbacks[i])
InternalDieCallbacks[i]();
} }
internal__exit(1); if (common_flags()->abort_on_error)
Abort();
internal__exit(common_flags()->exitcode);
} }
static CheckFailedCallbackType CheckFailedCallback; static CheckFailedCallbackType CheckFailedCallback;
@ -76,37 +161,57 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
Die(); Die();
} }
uptr ReadFileToBuffer(const char *file_name, char **buff, void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
uptr *buff_size, uptr max_len) { error_t err) {
static int recursion_count;
if (recursion_count) {
// The Report() and CHECK calls below may call mmap recursively and fail.
// If we went into recursion, just die.
RawWrite("ERROR: Failed to mmap\n");
Die();
}
recursion_count++;
Report("ERROR: %s failed to "
"allocate 0x%zx (%zd) bytes of %s (error code: %d)\n",
SanitizerToolName, size, size, mem_type, err);
DumpProcessMap();
UNREACHABLE("unable to mmap");
}
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr *read_len, uptr max_len, error_t *errno_p) {
uptr PageSize = GetPageSizeCached(); uptr PageSize = GetPageSizeCached();
uptr kMinFileLen = PageSize; uptr kMinFileLen = PageSize;
uptr read_len = 0; *buff = nullptr;
*buff = 0;
*buff_size = 0; *buff_size = 0;
*read_len = 0;
// The files we usually open are not seekable, so try different buffer sizes. // The files we usually open are not seekable, so try different buffer sizes.
for (uptr size = kMinFileLen; size <= max_len; size *= 2) { for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
uptr openrv = OpenFile(file_name, /*write*/ false); fd_t fd = OpenFile(file_name, RdOnly, errno_p);
if (internal_iserror(openrv)) return 0; if (fd == kInvalidFd) return false;
fd_t fd = openrv;
UnmapOrDie(*buff, *buff_size); UnmapOrDie(*buff, *buff_size);
*buff = (char*)MmapOrDie(size, __func__); *buff = (char*)MmapOrDie(size, __func__);
*buff_size = size; *buff_size = size;
*read_len = 0;
// Read up to one page at a time. // Read up to one page at a time.
read_len = 0;
bool reached_eof = false; bool reached_eof = false;
while (read_len + PageSize <= size) { while (*read_len + PageSize <= size) {
uptr just_read = internal_read(fd, *buff + read_len, PageSize); uptr just_read;
if (!ReadFromFile(fd, *buff + *read_len, PageSize, &just_read, errno_p)) {
UnmapOrDie(*buff, *buff_size);
return false;
}
if (just_read == 0) { if (just_read == 0) {
reached_eof = true; reached_eof = true;
break; break;
} }
read_len += just_read; *read_len += just_read;
} }
internal_close(fd); CloseFile(fd);
if (reached_eof) // We've read the whole file. if (reached_eof) // We've read the whole file.
break; break;
} }
return read_len; return true;
} }
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b); typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
@ -143,62 +248,77 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
const char *StripPathPrefix(const char *filepath, const char *StripPathPrefix(const char *filepath,
const char *strip_path_prefix) { const char *strip_path_prefix) {
if (filepath == 0) return 0; if (!filepath) return nullptr;
if (strip_path_prefix == 0) return filepath; if (!strip_path_prefix) return filepath;
const char *pos = internal_strstr(filepath, strip_path_prefix); const char *res = filepath;
if (pos == 0) return filepath; if (const char *pos = internal_strstr(filepath, strip_path_prefix))
pos += internal_strlen(strip_path_prefix); res = pos + internal_strlen(strip_path_prefix);
if (pos[0] == '.' && pos[1] == '/') if (res[0] == '.' && res[1] == '/')
pos += 2; res += 2;
return pos; return res;
} }
const char *StripModuleName(const char *module) { const char *StripModuleName(const char *module) {
if (module == 0) if (!module)
return 0; return nullptr;
if (const char *slash_pos = internal_strrchr(module, '/')) if (SANITIZER_WINDOWS) {
// On Windows, both slash and backslash are possible.
// Pick the one that goes last.
if (const char *bslash_pos = internal_strrchr(module, '\\'))
return StripModuleName(bslash_pos + 1);
}
if (const char *slash_pos = internal_strrchr(module, '/')) {
return slash_pos + 1; return slash_pos + 1;
}
return module; return module;
} }
void ReportErrorSummary(const char *error_message) { void ReportErrorSummary(const char *error_message) {
if (!common_flags()->print_summary) if (!common_flags()->print_summary)
return; return;
InternalScopedBuffer<char> buff(kMaxSummaryLength); InternalScopedString buff(kMaxSummaryLength);
internal_snprintf(buff.data(), buff.size(), buff.append("SUMMARY: %s: %s", SanitizerToolName, error_message);
"SUMMARY: %s: %s", SanitizerToolName, error_message);
__sanitizer_report_error_summary(buff.data()); __sanitizer_report_error_summary(buff.data());
} }
void ReportErrorSummary(const char *error_type, const char *file, #ifndef SANITIZER_GO
int line, const char *function) { void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
if (!common_flags()->print_summary) if (!common_flags()->print_summary)
return; return;
InternalScopedBuffer<char> buff(kMaxSummaryLength); InternalScopedString buff(kMaxSummaryLength);
internal_snprintf( buff.append("%s ", error_type);
buff.data(), buff.size(), "%s %s:%d %s", error_type, RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
file ? StripPathPrefix(file, common_flags()->strip_path_prefix) : "??", common_flags()->strip_path_prefix);
line, function ? function : "??");
ReportErrorSummary(buff.data()); ReportErrorSummary(buff.data());
} }
#endif
LoadedModule::LoadedModule(const char *module_name, uptr base_address) { void LoadedModule::set(const char *module_name, uptr base_address) {
clear();
full_name_ = internal_strdup(module_name); full_name_ = internal_strdup(module_name);
base_address_ = base_address; base_address_ = base_address;
n_ranges_ = 0; }
void LoadedModule::clear() {
InternalFree(full_name_);
full_name_ = nullptr;
while (!ranges_.empty()) {
AddressRange *r = ranges_.front();
ranges_.pop_front();
InternalFree(r);
}
} }
void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) { void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges); void *mem = InternalAlloc(sizeof(AddressRange));
ranges_[n_ranges_].beg = beg; AddressRange *r = new(mem) AddressRange(beg, end, executable);
ranges_[n_ranges_].end = end; ranges_.push_back(r);
exec_[n_ranges_] = executable;
n_ranges_++;
} }
bool LoadedModule::containsAddress(uptr address) const { bool LoadedModule::containsAddress(uptr address) const {
for (uptr i = 0; i < n_ranges_; i++) { for (Iterator iter = ranges(); iter.hasNext();) {
if (ranges_[i].beg <= address && address < ranges_[i].end) const AddressRange *r = iter.next();
if (r->beg <= address && address < r->end)
return true; return true;
} }
return false; return false;
@ -210,12 +330,9 @@ void IncreaseTotalMmap(uptr size) {
if (!common_flags()->mmap_limit_mb) return; if (!common_flags()->mmap_limit_mb) return;
uptr total_mmaped = uptr total_mmaped =
atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size; atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
if ((total_mmaped >> 20) > common_flags()->mmap_limit_mb) { // Since for now mmap_limit_mb is not a user-facing flag, just kill
// Since for now mmap_limit_mb is not a user-facing flag, just CHECK. // a program. Use RAW_CHECK to avoid extra mmaps in reporting.
uptr mmap_limit_mb = common_flags()->mmap_limit_mb; RAW_CHECK((total_mmaped >> 20) < common_flags()->mmap_limit_mb);
common_flags()->mmap_limit_mb = 0; // Allow mmap in CHECK.
RAW_CHECK(total_mmaped >> 20 < mmap_limit_mb);
}
} }
void DecreaseTotalMmap(uptr size) { void DecreaseTotalMmap(uptr size) {
@ -223,39 +340,130 @@ void DecreaseTotalMmap(uptr size) {
atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed); atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
} }
} // namespace __sanitizer bool TemplateMatch(const char *templ, const char *str) {
if ((!str) || str[0] == 0)
return false;
bool start = false;
if (templ && templ[0] == '^') {
start = true;
templ++;
}
bool asterisk = false;
while (templ && templ[0]) {
if (templ[0] == '*') {
templ++;
start = false;
asterisk = true;
continue;
}
if (templ[0] == '$')
return str[0] == 0 || asterisk;
if (str[0] == 0)
return false;
char *tpos = (char*)internal_strchr(templ, '*');
char *tpos1 = (char*)internal_strchr(templ, '$');
if ((!tpos) || (tpos1 && tpos1 < tpos))
tpos = tpos1;
if (tpos)
tpos[0] = 0;
const char *str0 = str;
const char *spos = internal_strstr(str, templ);
str = spos + internal_strlen(templ);
templ = tpos;
if (tpos)
tpos[0] = tpos == tpos1 ? '$' : '*';
if (!spos)
return false;
if (start && spos != str0)
return false;
start = false;
asterisk = false;
}
return true;
}
static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';
char *FindPathToBinary(const char *name) {
const char *path = GetEnv("PATH");
if (!path)
return nullptr;
uptr name_len = internal_strlen(name);
InternalScopedBuffer<char> buffer(kMaxPathLength);
const char *beg = path;
while (true) {
const char *end = internal_strchrnul(beg, kPathSeparator);
uptr prefix_len = end - beg;
if (prefix_len + name_len + 2 <= kMaxPathLength) {
internal_memcpy(buffer.data(), beg, prefix_len);
buffer[prefix_len] = '/';
internal_memcpy(&buffer[prefix_len + 1], name, name_len);
buffer[prefix_len + 1 + name_len] = '\0';
if (FileExists(buffer.data()))
return internal_strdup(buffer.data());
}
if (*end == '\0') break;
beg = end + 1;
}
return nullptr;
}
static char binary_name_cache_str[kMaxPathLength];
static char process_name_cache_str[kMaxPathLength];
const char *GetProcessName() {
return process_name_cache_str;
}
static uptr ReadProcessName(/*out*/ char *buf, uptr buf_len) {
ReadLongProcessName(buf, buf_len);
char *s = const_cast<char *>(StripModuleName(buf));
uptr len = internal_strlen(s);
if (s != buf) {
internal_memmove(buf, s, len);
buf[len] = '\0';
}
return len;
}
void UpdateProcessName() {
ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
}
// Call once to make sure that binary_name_cache_str is initialized
void CacheBinaryName() {
if (binary_name_cache_str[0] != '\0')
return;
ReadBinaryName(binary_name_cache_str, sizeof(binary_name_cache_str));
ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
}
uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {
CacheBinaryName();
uptr name_len = internal_strlen(binary_name_cache_str);
name_len = (name_len < buf_len - 1) ? name_len : buf_len - 1;
if (buf_len == 0)
return 0;
internal_memcpy(buf, binary_name_cache_str, name_len);
buf[name_len] = '\0';
return name_len;
}
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT using namespace __sanitizer; // NOLINT
extern "C" { extern "C" {
void __sanitizer_set_report_path(const char *path) { void __sanitizer_set_report_path(const char *path) {
if (!path) report_file.SetReportPath(path);
return;
uptr len = internal_strlen(path);
if (len > sizeof(report_path_prefix) - 100) {
Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
path[0], path[1], path[2], path[3],
path[4], path[5], path[6], path[7]);
Die();
}
if (report_fd != kStdoutFd &&
report_fd != kStderrFd &&
report_fd != kInvalidFd)
internal_close(report_fd);
report_fd = kInvalidFd;
log_to_file = false;
if (internal_strcmp(path, "stdout") == 0) {
report_fd = kStdoutFd;
} else if (internal_strcmp(path, "stderr") == 0) {
report_fd = kStderrFd;
} else {
internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
report_path_prefix[len] = '\0';
log_to_file = true;
}
} }
void __sanitizer_report_error_summary(const char *error_summary) { void __sanitizer_report_error_summary(const char *error_summary) {
Printf("%s\n", error_summary); Printf("%s\n", error_summary);
} }
} // extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_death_callback(void (*callback)(void)) {
SetUserDieCallback(callback);
}
} // extern "C"

View File

@ -5,8 +5,8 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// //
// This file is shared between AddressSanitizer and ThreadSanitizer // This file is shared between run-time libraries of sanitizers.
// run-time libraries. //
// It declares common functions and classes that are used in both runtimes. // It declares common functions and classes that are used in both runtimes.
// Implementation of some functions are provided in sanitizer_common, while // Implementation of some functions are provided in sanitizer_common, while
// others must be defined by run-time library itself. // others must be defined by run-time library itself.
@ -14,13 +14,21 @@
#ifndef SANITIZER_COMMON_H #ifndef SANITIZER_COMMON_H
#define SANITIZER_COMMON_H #define SANITIZER_COMMON_H
#include "sanitizer_flags.h"
#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h" #include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h" #include "sanitizer_libc.h"
#include "sanitizer_list.h"
#include "sanitizer_mutex.h" #include "sanitizer_mutex.h"
#include "sanitizer_flags.h"
#ifdef _MSC_VER
extern "C" void _ReadWriteBarrier();
#pragma intrinsic(_ReadWriteBarrier)
#endif
namespace __sanitizer { namespace __sanitizer {
struct StackTrace; struct StackTrace;
struct AddressInfo;
// Constants. // Constants.
const uptr kWordSize = SANITIZER_WORDSIZE / 8; const uptr kWordSize = SANITIZER_WORDSIZE / 8;
@ -32,12 +40,27 @@ const uptr kWordSizeInBits = 8 * kWordSize;
const uptr kCacheLineSize = 64; const uptr kCacheLineSize = 64;
#endif #endif
const uptr kMaxPathLength = 512; const uptr kMaxPathLength = 4096;
// 16K loaded modules should be enough for everyone.
static const uptr kMaxNumberOfModules = 1 << 14;
const uptr kMaxThreadStackSize = 1 << 30; // 1Gb const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
// Denotes fake PC values that come from JIT/JAVA/etc.
// For such PC values __tsan_symbolize_external() will be called.
const u64 kExternalPCBit = 1ULL << 60;
extern const char *SanitizerToolName; // Can be changed by the tool. extern const char *SanitizerToolName; // Can be changed by the tool.
extern atomic_uint32_t current_verbosity;
INLINE void SetVerbosity(int verbosity) {
atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
}
INLINE int Verbosity() {
return atomic_load(&current_verbosity, memory_order_relaxed);
}
uptr GetPageSize(); uptr GetPageSize();
uptr GetPageSizeCached(); uptr GetPageSizeCached();
uptr GetMmapGranularity(); uptr GetMmapGranularity();
@ -53,17 +76,27 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
// Memory management // Memory management
void *MmapOrDie(uptr size, const char *mem_type); void *MmapOrDie(uptr size, const char *mem_type);
void UnmapOrDie(void *addr, uptr size); void UnmapOrDie(void *addr, uptr size);
void *MmapFixedNoReserve(uptr fixed_addr, uptr size); void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
const char *name = nullptr);
void *MmapNoReserveOrDie(uptr size, const char *mem_type); void *MmapNoReserveOrDie(uptr size, const char *mem_type);
void *MmapFixedOrDie(uptr fixed_addr, uptr size); void *MmapFixedOrDie(uptr fixed_addr, uptr size);
void *Mprotect(uptr fixed_addr, uptr size); void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
// Map aligned chunk of address space; size and alignment are powers of two. // Map aligned chunk of address space; size and alignment are powers of two.
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type); void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
// Disallow access to a memory range. Use MmapNoAccess to allocate an
// unaccessible memory.
bool MprotectNoAccess(uptr addr, uptr size);
// Used to check if we can map shadow memory to a fixed location. // Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
void FlushUnneededShadowMemory(uptr addr, uptr size); void FlushUnneededShadowMemory(uptr addr, uptr size);
void IncreaseTotalMmap(uptr size); void IncreaseTotalMmap(uptr size);
void DecreaseTotalMmap(uptr size); void DecreaseTotalMmap(uptr size);
uptr GetRSS();
void NoHugePagesInRegion(uptr addr, uptr length);
void DontDumpShadowMemory(uptr addr, uptr length);
// Check if the built VMA size matches the runtime one.
void CheckVMASize();
// InternalScopedBuffer can be used instead of large stack arrays to // InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low. // keep frame size low.
@ -126,44 +159,93 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
// IO // IO
void RawWrite(const char *buffer); void RawWrite(const char *buffer);
bool PrintsToTty();
// Caching version of PrintsToTty(). Not thread-safe.
bool PrintsToTtyCached();
bool ColorizeReports(); bool ColorizeReports();
void Printf(const char *format, ...); void Printf(const char *format, ...);
void Report(const char *format, ...); void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *)); void SetPrintfAndReportCallback(void (*callback)(const char *));
#define VReport(level, ...) \ #define VReport(level, ...) \
do { \ do { \
if ((uptr)common_flags()->verbosity >= (level)) Report(__VA_ARGS__); \ if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
} while (0) } while (0)
#define VPrintf(level, ...) \ #define VPrintf(level, ...) \
do { \ do { \
if ((uptr)common_flags()->verbosity >= (level)) Printf(__VA_ARGS__); \ if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
} while (0) } while (0)
// Can be used to prevent mixing error reports from different sanitizers. // Can be used to prevent mixing error reports from different sanitizers.
extern StaticSpinMutex CommonSanitizerReportMutex; extern StaticSpinMutex CommonSanitizerReportMutex;
void MaybeOpenReportFile();
extern fd_t report_fd; struct ReportFile {
extern bool log_to_file; void Write(const char *buffer, uptr length);
extern char report_path_prefix[4096]; bool SupportsColors();
extern uptr report_fd_pid; void SetReportPath(const char *path);
// Don't use fields directly. They are only declared public to allow
// aggregate initialization.
// Protects fields below.
StaticSpinMutex *mu;
// Opened file descriptor. Defaults to stderr. It may be equal to
// kInvalidFd, in which case new file will be opened when necessary.
fd_t fd;
// Path prefix of report file, set via __sanitizer_set_report_path.
char path_prefix[kMaxPathLength];
// Full path to report, obtained as <path_prefix>.PID
char full_path[kMaxPathLength];
// PID of the process that opened fd. If a fork() occurs,
// the PID of child will be different from fd_pid.
uptr fd_pid;
private:
void ReopenIfNecessary();
};
extern ReportFile report_file;
extern uptr stoptheworld_tracer_pid; extern uptr stoptheworld_tracer_pid;
extern uptr stoptheworld_tracer_ppid; extern uptr stoptheworld_tracer_ppid;
uptr OpenFile(const char *filename, bool write); enum FileAccessMode {
RdOnly,
WrOnly,
RdWr
};
// Returns kInvalidFd on error.
fd_t OpenFile(const char *filename, FileAccessMode mode,
error_t *errno_p = nullptr);
void CloseFile(fd_t);
// Return true on success, false on error.
bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
uptr *bytes_read = nullptr, error_t *error_p = nullptr);
bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
uptr *bytes_written = nullptr, error_t *error_p = nullptr);
bool RenameFile(const char *oldpath, const char *newpath,
error_t *error_p = nullptr);
// Scoped file handle closer.
struct FileCloser {
explicit FileCloser(fd_t fd) : fd(fd) {}
~FileCloser() { CloseFile(fd); }
fd_t fd;
};
bool SupportsColoredOutput(fd_t fd);
// Opens the file 'file_name" and reads up to 'max_len' bytes. // Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'. // The resulting buffer is mmaped and stored in '*buff'.
// The size of the mmaped region is stored in '*buff_size', // The size of the mmaped region is stored in '*buff_size'.
// Returns the number of read bytes or 0 if file can not be opened. // The total number of read bytes is stored in '*read_len'.
uptr ReadFileToBuffer(const char *file_name, char **buff, // Returns true if file was successfully opened and read.
uptr *buff_size, uptr max_len); bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr *read_len, uptr max_len = 1 << 26,
error_t *errno_p = nullptr);
// Maps given file to virtual memory, and returns pointer to it // Maps given file to virtual memory, and returns pointer to it
// (or NULL if the mapping failes). Stores the size of mmaped region // (or NULL if mapping fails). Stores the size of mmaped region
// in '*buff_size'. // in '*buff_size'.
void *MapFileToMemory(const char *file_name, uptr *buff_size); void *MapFileToMemory(const char *file_name, uptr *buff_size);
void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset); void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
bool IsAccessibleMemoryRange(uptr beg, uptr size); bool IsAccessibleMemoryRange(uptr beg, uptr size);
@ -174,6 +256,12 @@ const char *StripPathPrefix(const char *filepath,
const char *StripModuleName(const char *module); const char *StripModuleName(const char *module);
// OS // OS
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
const char *GetProcessName();
void UpdateProcessName();
void CacheBinaryName();
void DisableCoreDumperIfNecessary(); void DisableCoreDumperIfNecessary();
void DumpProcessMap(); void DumpProcessMap();
bool FileExists(const char *filename); bool FileExists(const char *filename);
@ -181,6 +269,9 @@ const char *GetEnv(const char *name);
bool SetEnv(const char *name, const char *value); bool SetEnv(const char *name, const char *value);
const char *GetPwd(); const char *GetPwd();
char *FindPathToBinary(const char *name); char *FindPathToBinary(const char *name);
bool IsPathSeparator(const char c);
bool IsAbsolutePath(const char *path);
u32 GetUid(); u32 GetUid();
void ReExec(); void ReExec();
bool StackSizeIsUnlimited(); bool StackSizeIsUnlimited();
@ -192,10 +283,13 @@ void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args); void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void SetSandboxingCallback(void (*f)()); void SetSandboxingCallback(void (*f)());
void CovUpdateMapping(uptr caller_pc = 0); void CoverageUpdateMapping();
void CovBeforeFork(); void CovBeforeFork();
void CovAfterFork(int child_pid); void CovAfterFork(int child_pid);
void InitializeCoverage(bool enabled, const char *coverage_dir);
void ReInitializeCoverage(bool enabled, const char *coverage_dir);
void InitTlsSize(); void InitTlsSize();
uptr GetTlsSize(); uptr GetTlsSize();
@ -205,12 +299,15 @@ void SleepForMillis(int millis);
u64 NanoTime(); u64 NanoTime();
int Atexit(void (*function)(void)); int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size); void SortArray(uptr *array, uptr size);
bool TemplateMatch(const char *templ, const char *str);
// Exit // Exit
void NORETURN Abort(); void NORETURN Abort();
void NORETURN Die(); void NORETURN Die();
void NORETURN void NORETURN
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
error_t err);
// Set the name of the current thread to 'name', return true on succees. // Set the name of the current thread to 'name', return true on succees.
// The name may be truncated to a system-dependent limit. // The name may be truncated to a system-dependent limit.
@ -222,12 +319,26 @@ bool SanitizerGetThreadName(char *name, int max_len);
// Specific tools may override behavior of "Die" and "CheckFailed" functions // Specific tools may override behavior of "Die" and "CheckFailed" functions
// to do tool-specific job. // to do tool-specific job.
typedef void (*DieCallbackType)(void); typedef void (*DieCallbackType)(void);
void SetDieCallback(DieCallbackType);
DieCallbackType GetDieCallback(); // It's possible to add several callbacks that would be run when "Die" is
// called. The callbacks will be run in the opposite order. The tools are
// strongly recommended to setup all callbacks during initialization, when there
// is only a single thread.
bool AddDieCallback(DieCallbackType callback);
bool RemoveDieCallback(DieCallbackType callback);
void SetUserDieCallback(DieCallbackType callback);
typedef void (*CheckFailedCallbackType)(const char *, int, const char *, typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64); u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback); void SetCheckFailedCallback(CheckFailedCallbackType callback);
// Callback will be called if soft_rss_limit_mb is given and the limit is
// exceeded (exceeded==true) or if rss went down below the limit
// (exceeded==false).
// The callback should be registered once at the tool init time.
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
// Functions related to signal handling. // Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *); typedef void (*SignalHandlerType)(int, void *, void *);
bool IsDeadlySignal(int signum); bool IsDeadlySignal(int signum);
@ -243,9 +354,9 @@ const int kMaxSummaryLength = 1024;
// and pass it to __sanitizer_report_error_summary. // and pass it to __sanitizer_report_error_summary.
void ReportErrorSummary(const char *error_message); void ReportErrorSummary(const char *error_message);
// Same as above, but construct error_message as: // Same as above, but construct error_message as:
// error_type file:line function // error_type file:line[:column][ function]
void ReportErrorSummary(const char *error_type, const char *file, void ReportErrorSummary(const char *error_type, const AddressInfo &info);
int line, const char *function); // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
void ReportErrorSummary(const char *error_type, StackTrace *trace); void ReportErrorSummary(const char *error_type, StackTrace *trace);
// Math // Math
@ -264,7 +375,11 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U); CHECK_NE(x, 0U);
unsigned long up; // NOLINT unsigned long up; // NOLINT
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
# ifdef _WIN64
up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
# else
up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x); up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
# endif
#elif defined(_WIN64) #elif defined(_WIN64)
_BitScanReverse64(&up, x); _BitScanReverse64(&up, x);
#else #else
@ -277,7 +392,11 @@ INLINE uptr LeastSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U); CHECK_NE(x, 0U);
unsigned long up; // NOLINT unsigned long up; // NOLINT
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
# ifdef _WIN64
up = __builtin_ctzll(x);
# else
up = __builtin_ctzl(x); up = __builtin_ctzl(x);
# endif
#elif defined(_WIN64) #elif defined(_WIN64)
_BitScanForward64(&up, x); _BitScanForward64(&up, x);
#else #else
@ -297,7 +416,7 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
uptr up = MostSignificantSetBitIndex(size); uptr up = MostSignificantSetBitIndex(size);
CHECK(size < (1ULL << (up + 1))); CHECK(size < (1ULL << (up + 1)));
CHECK(size > (1ULL << up)); CHECK(size > (1ULL << up));
return 1UL << (up + 1); return 1ULL << (up + 1);
} }
INLINE uptr RoundUpTo(uptr size, uptr boundary) { INLINE uptr RoundUpTo(uptr size, uptr boundary) {
@ -315,17 +434,7 @@ INLINE bool IsAligned(uptr a, uptr alignment) {
INLINE uptr Log2(uptr x) { INLINE uptr Log2(uptr x) {
CHECK(IsPowerOfTwo(x)); CHECK(IsPowerOfTwo(x));
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) return LeastSignificantSetBitIndex(x);
return __builtin_ctzl(x);
#elif defined(_WIN64)
unsigned long ret; // NOLINT
_BitScanForward64(&ret, x);
return ret;
#else
unsigned long ret; // NOLINT
_BitScanForward(&ret, x);
return ret;
#endif
} }
// Don't use std::min, std::max or std::swap, to minimize dependency // Don't use std::min, std::max or std::swap, to minimize dependency
@ -354,14 +463,14 @@ INLINE int ToLower(int c) {
// small vectors. // small vectors.
// WARNING: The current implementation supports only POD types. // WARNING: The current implementation supports only POD types.
template<typename T> template<typename T>
class InternalMmapVector { class InternalMmapVectorNoCtor {
public: public:
explicit InternalMmapVector(uptr initial_capacity) { void Initialize(uptr initial_capacity) {
capacity_ = Max(initial_capacity, (uptr)1); capacity_ = Max(initial_capacity, (uptr)1);
size_ = 0; size_ = 0;
data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVector"); data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
} }
~InternalMmapVector() { void Destroy() {
UnmapOrDie(data_, capacity_ * sizeof(T)); UnmapOrDie(data_, capacity_ * sizeof(T));
} }
T &operator[](uptr i) { T &operator[](uptr i) {
@ -394,11 +503,15 @@ class InternalMmapVector {
const T *data() const { const T *data() const {
return data_; return data_;
} }
T *data() {
return data_;
}
uptr capacity() const { uptr capacity() const {
return capacity_; return capacity_;
} }
void clear() { size_ = 0; } void clear() { size_ = 0; }
bool empty() const { return size() == 0; }
private: private:
void Resize(uptr new_capacity) { void Resize(uptr new_capacity) {
@ -412,15 +525,24 @@ class InternalMmapVector {
UnmapOrDie(old_data, capacity_ * sizeof(T)); UnmapOrDie(old_data, capacity_ * sizeof(T));
capacity_ = new_capacity; capacity_ = new_capacity;
} }
// Disallow evil constructors.
InternalMmapVector(const InternalMmapVector&);
void operator=(const InternalMmapVector&);
T *data_; T *data_;
uptr capacity_; uptr capacity_;
uptr size_; uptr size_;
}; };
template<typename T>
class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
public:
explicit InternalMmapVector(uptr initial_capacity) {
InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
}
~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
// Disallow evil constructors.
InternalMmapVector(const InternalMmapVector&);
void operator=(const InternalMmapVector&);
};
// HeapSort for arrays and InternalMmapVector. // HeapSort for arrays and InternalMmapVector.
template<class Container, class Compare> template<class Container, class Compare>
void InternalSort(Container *v, uptr size, Compare comp) { void InternalSort(Container *v, uptr size, Compare comp) {
@ -478,29 +600,32 @@ uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
// executable or a shared object). // executable or a shared object).
class LoadedModule { class LoadedModule {
public: public:
LoadedModule(const char *module_name, uptr base_address); LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); }
void set(const char *module_name, uptr base_address);
void clear();
void addAddressRange(uptr beg, uptr end, bool executable); void addAddressRange(uptr beg, uptr end, bool executable);
bool containsAddress(uptr address) const; bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; } const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; } uptr base_address() const { return base_address_; }
uptr n_ranges() const { return n_ranges_; }
uptr address_range_start(int i) const { return ranges_[i].beg; }
uptr address_range_end(int i) const { return ranges_[i].end; }
bool address_range_executable(int i) const { return exec_[i]; }
private:
struct AddressRange { struct AddressRange {
AddressRange *next;
uptr beg; uptr beg;
uptr end; uptr end;
bool executable;
AddressRange(uptr beg, uptr end, bool executable)
: next(nullptr), beg(beg), end(end), executable(executable) {}
}; };
char *full_name_;
typedef IntrusiveList<AddressRange>::ConstIterator Iterator;
Iterator ranges() const { return Iterator(&ranges_); }
private:
char *full_name_; // Owned.
uptr base_address_; uptr base_address_;
static const uptr kMaxNumberOfAddressRanges = 6; IntrusiveList<AddressRange> ranges_;
AddressRange ranges_[kMaxNumberOfAddressRanges];
bool exec_[kMaxNumberOfAddressRanges];
uptr n_ranges_;
}; };
// OS-dependent function that fills array with descriptions of at most // OS-dependent function that fills array with descriptions of at most
@ -511,45 +636,80 @@ typedef bool (*string_predicate_t)(const char *);
uptr GetListOfModules(LoadedModule *modules, uptr max_modules, uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
string_predicate_t filter); string_predicate_t filter);
#if SANITIZER_POSIX
const uptr kPthreadDestructorIterations = 4;
#else
// Unused on Windows.
const uptr kPthreadDestructorIterations = 0;
#endif
// Callback type for iterating over a set of memory ranges. // Callback type for iterating over a set of memory ranges.
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg); typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !defined(SANITIZER_GO) enum AndroidApiLevel {
extern uptr indirect_call_wrapper; ANDROID_NOT_ANDROID = 0,
void SetIndirectCallWrapper(uptr wrapper); ANDROID_KITKAT = 19,
ANDROID_LOLLIPOP_MR1 = 22,
ANDROID_POST_LOLLIPOP = 23
};
template <typename F> #if SANITIZER_LINUX
F IndirectExternCall(F f) { // Initialize Android logging. Any writes before this are silently lost.
typedef F (*WrapF)(F); void AndroidLogInit();
return indirect_call_wrapper ? ((WrapF)indirect_call_wrapper)(f) : f; void WriteToSyslog(const char *buffer);
}
#else #else
INLINE void SetIndirectCallWrapper(uptr wrapper) {} INLINE void AndroidLogInit() {}
template <typename F> INLINE void WriteToSyslog(const char *buffer) {}
F IndirectExternCall(F f) {
return f;
}
#endif #endif
#if SANITIZER_ANDROID #if SANITIZER_ANDROID
// Initialize Android logging. Any writes before this are silently lost.
void AndroidLogInit();
void AndroidLogWrite(const char *buffer);
void GetExtraActivationFlags(char *buf, uptr size); void GetExtraActivationFlags(char *buf, uptr size);
void SanitizerInitializeUnwinder(); void SanitizerInitializeUnwinder();
AndroidApiLevel AndroidGetApiLevel();
#else #else
INLINE void AndroidLogInit() {}
INLINE void AndroidLogWrite(const char *buffer_unused) {} INLINE void AndroidLogWrite(const char *buffer_unused) {}
INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; } INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
INLINE void SanitizerInitializeUnwinder() {} INLINE void SanitizerInitializeUnwinder() {}
INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
#endif #endif
INLINE uptr GetPthreadDestructorIterations() {
#if SANITIZER_ANDROID
return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
#elif SANITIZER_POSIX
return 4;
#else
// Unused on Windows.
return 0;
#endif
}
void *internal_start_thread(void(*func)(void*), void *arg);
void internal_join_thread(void *th);
void MaybeStartBackgroudThread();
// Make the compiler think that something is going on there.
// Use this inside a loop that looks like memset/memcpy/etc to prevent the
// compiler from recognising it and turning it into an actual call to
// memset/memcpy/etc.
static inline void SanitizerBreakOptimization(void *arg) {
#if _MSC_VER && !defined(__clang__)
_ReadWriteBarrier();
#else
__asm__ __volatile__("" : : "r" (arg) : "memory");
#endif
}
struct SignalContext {
void *context;
uptr addr;
uptr pc;
uptr sp;
uptr bp;
SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp) :
context(context), addr(addr), pc(pc), sp(sp), bp(bp) {
}
// Creates signal context in a platform-specific manner.
static SignalContext Create(void *siginfo, void *context);
};
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
} // namespace __sanitizer } // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size, inline void *operator new(__sanitizer::operator_new_size_type size,

View File

@ -11,6 +11,7 @@
// with a few common GNU extensions. // with a few common GNU extensions.
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include <stdarg.h> #include <stdarg.h>
static const char *parse_number(const char *p, int *out) { static const char *parse_number(const char *p, int *out) {
@ -189,7 +190,7 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
continue; continue;
} }
if (*p == '\0') { if (*p == '\0') {
return 0; return nullptr;
} }
// %n$ // %n$
p = maybe_parse_param_index(p, &dir->argIdx); p = maybe_parse_param_index(p, &dir->argIdx);
@ -204,7 +205,7 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
p = parse_number(p, &dir->fieldWidth); p = parse_number(p, &dir->fieldWidth);
CHECK(p); CHECK(p);
if (dir->fieldWidth <= 0) // Width if at all must be non-zero if (dir->fieldWidth <= 0) // Width if at all must be non-zero
return 0; return nullptr;
} }
// m // m
if (*p == 'm') { if (*p == 'm') {
@ -224,8 +225,8 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
while (*p && *p != ']') while (*p && *p != ']')
++p; ++p;
if (*p == 0) if (*p == 0)
return 0; // unexpected end of string return nullptr; // unexpected end of string
// Consume the closing ']'. // Consume the closing ']'.
++p; ++p;
} }
// This is unfortunately ambiguous between old GNU extension // This is unfortunately ambiguous between old GNU extension
@ -249,7 +250,7 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
while (*q && *q != ']' && *q != '%') while (*q && *q != ']' && *q != '%')
++q; ++q;
if (*q == 0 || *q == '%') if (*q == 0 || *q == '%')
return 0; return nullptr;
p = q + 1; // Consume the closing ']'. p = q + 1; // Consume the closing ']'.
dir->maybeGnuMalloc = true; dir->maybeGnuMalloc = true;
} }
@ -393,7 +394,7 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
continue; continue;
} }
if (*p == '\0') { if (*p == '\0') {
return 0; return nullptr;
} }
// %n$ // %n$
p = maybe_parse_param_index(p, &dir->precisionIdx); p = maybe_parse_param_index(p, &dir->precisionIdx);
@ -406,7 +407,7 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
p = maybe_parse_number_or_star(p, &dir->fieldWidth, p = maybe_parse_number_or_star(p, &dir->fieldWidth,
&dir->starredWidth); &dir->starredWidth);
if (!p) if (!p)
return 0; return nullptr;
// Precision // Precision
if (*p == '.') { if (*p == '.') {
++p; ++p;
@ -414,7 +415,7 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
p = maybe_parse_number_or_star(p, &dir->fieldPrecision, p = maybe_parse_number_or_star(p, &dir->fieldPrecision,
&dir->starredPrecision); &dir->starredPrecision);
if (!p) if (!p)
return 0; return nullptr;
// m$ // m$
if (dir->starredPrecision) { if (dir->starredPrecision) {
p = maybe_parse_param_index(p, &dir->precisionIdx); p = maybe_parse_param_index(p, &dir->precisionIdx);
@ -554,4 +555,4 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
} }
} }
#endif // SANITIZER_INTERCEPT_PRINTF #endif // SANITIZER_INTERCEPT_PRINTF

View File

@ -518,7 +518,7 @@ static const ioctl_desc *ioctl_table_lookup(unsigned req) {
if (left == right && ioctl_table[left].req == req) if (left == right && ioctl_table[left].req == req)
return ioctl_table + left; return ioctl_table + left;
else else
return 0; return nullptr;
} }
static bool ioctl_decode(unsigned req, ioctl_desc *desc) { static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
@ -565,7 +565,7 @@ static const ioctl_desc *ioctl_lookup(unsigned req) {
(desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE || (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
desc->type == ioctl_desc::READ)) desc->type == ioctl_desc::READ))
return desc; return desc;
return 0; return nullptr;
} }
static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d, static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
@ -576,14 +576,10 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
} }
if (desc->type != ioctl_desc::CUSTOM) if (desc->type != ioctl_desc::CUSTOM)
return; return;
switch (request) { if (request == IOCTL_SIOCGIFCONF) {
case 0x00008912: { // SIOCGIFCONF struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg; COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
break;
}
} }
return;
} }
static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d, static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
@ -595,12 +591,8 @@ static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
} }
if (desc->type != ioctl_desc::CUSTOM) if (desc->type != ioctl_desc::CUSTOM)
return; return;
switch (request) { if (request == IOCTL_SIOCGIFCONF) {
case 0x00008912: { // SIOCGIFCONF struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
break;
}
} }
return;
} }

View File

@ -11,35 +11,31 @@
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_flags.h" #include "sanitizer_flags.h"
#include "sanitizer_stackdepot.h"
#include "sanitizer_stacktrace.h" #include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h" #include "sanitizer_symbolizer.h"
#if SANITIZER_POSIX
#include "sanitizer_posix.h"
#endif
namespace __sanitizer { namespace __sanitizer {
bool PrintsToTty() { bool ReportFile::SupportsColors() {
MaybeOpenReportFile(); SpinMutexLock l(mu);
return internal_isatty(report_fd) != 0; ReopenIfNecessary();
} return SupportsColoredOutput(fd);
bool PrintsToTtyCached() {
// FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
// printing on Windows.
if (SANITIZER_WINDOWS)
return 0;
static int cached = 0;
static bool prints_to_tty;
if (!cached) { // Not thread-safe.
prints_to_tty = PrintsToTty();
cached = 1;
}
return prints_to_tty;
} }
bool ColorizeReports() { bool ColorizeReports() {
// FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
// printing on Windows.
if (SANITIZER_WINDOWS)
return false;
const char *flag = common_flags()->color; const char *flag = common_flags()->color;
return internal_strcmp(flag, "always") == 0 || return internal_strcmp(flag, "always") == 0 ||
(internal_strcmp(flag, "auto") == 0 && PrintsToTtyCached()); (internal_strcmp(flag, "auto") == 0 && report_file.SupportsColors());
} }
static void (*sandboxing_callback)(); static void (*sandboxing_callback)();
@ -50,16 +46,82 @@ void SetSandboxingCallback(void (*f)()) {
void ReportErrorSummary(const char *error_type, StackTrace *stack) { void ReportErrorSummary(const char *error_type, StackTrace *stack) {
if (!common_flags()->print_summary) if (!common_flags()->print_summary)
return; return;
AddressInfo ai; if (stack->size == 0) {
#if !SANITIZER_GO ReportErrorSummary(error_type);
if (stack->size > 0 && Symbolizer::GetOrInit()->CanReturnFileLineInfo()) { return;
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
Symbolizer::GetOrInit()->SymbolizePC(pc, &ai, 1);
} }
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
ReportErrorSummary(error_type, frame->info);
frame->ClearAll();
}
static void (*SoftRssLimitExceededCallback)(bool exceeded);
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
SoftRssLimitExceededCallback = Callback;
}
void BackgroundThread(void *arg) {
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
while (true) {
SleepForMillis(100);
uptr current_rss_mb = GetRSS() >> 20;
if (Verbosity()) {
// If RSS has grown 10% since last time, print some information.
if (prev_reported_rss * 11 / 10 < current_rss_mb) {
Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
prev_reported_rss = current_rss_mb;
}
// If stack depot has grown 10% since last time, print it too.
StackDepotStats *stack_depot_stats = StackDepotGetStats();
if (prev_reported_stack_depot_size * 11 / 10 <
stack_depot_stats->allocated) {
Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
SanitizerToolName,
stack_depot_stats->n_uniq_ids,
stack_depot_stats->allocated >> 20);
prev_reported_stack_depot_size = stack_depot_stats->allocated;
}
}
// Check RSS against the limit.
if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
DumpProcessMap();
Die();
}
if (soft_rss_limit_mb) {
if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
reached_soft_rss_limit = true;
Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
if (SoftRssLimitExceededCallback)
SoftRssLimitExceededCallback(true);
} else if (soft_rss_limit_mb >= current_rss_mb &&
reached_soft_rss_limit) {
reached_soft_rss_limit = false;
if (SoftRssLimitExceededCallback)
SoftRssLimitExceededCallback(false);
}
}
}
}
void MaybeStartBackgroudThread() {
#if SANITIZER_LINUX // Need to implement/test on other platforms.
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
!common_flags()->soft_rss_limit_mb) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
#endif #endif
ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
} }
} // namespace __sanitizer } // namespace __sanitizer

View File

@ -2297,7 +2297,9 @@ PRE_SYSCALL(ni_syscall)() {}
POST_SYSCALL(ni_syscall)(long res) {} POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64)) #if !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__))
if (data) { if (data) {
if (request == ptrace_setregs) { if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz); PRE_READ((void *)data, struct_user_regs_struct_sz);
@ -2316,7 +2318,9 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
} }
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) { POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64)) #if !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__))
if (res >= 0 && data) { if (res >= 0 && data) {
// Note that this is different from the interceptor in // Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc. // sanitizer_common_interceptors.inc.

View File

@ -10,18 +10,24 @@
// //
// Compiler instrumentation: // Compiler instrumentation:
// For every interesting basic block the compiler injects the following code: // For every interesting basic block the compiler injects the following code:
// if (*Guard) { // if (Guard < 0) {
// __sanitizer_cov(); // __sanitizer_cov(&Guard);
// *Guard = 1;
// } // }
// At the module start up time __sanitizer_cov_module_init sets the guards
// to consecutive negative numbers (-1, -2, -3, ...).
// It's fine to call __sanitizer_cov more than once for a given block. // It's fine to call __sanitizer_cov more than once for a given block.
// //
// Run-time: // Run-time:
// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC). // - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
// and atomically set Guard to -Guard.
// - __sanitizer_cov_dump: dump the coverage data to disk. // - __sanitizer_cov_dump: dump the coverage data to disk.
// For every module of the current process that has coverage data // For every module of the current process that has coverage data
// this will create a file module_name.PID.sancov. The file format is simple: // this will create a file module_name.PID.sancov.
// it's just a sorted sequence of 4-byte offsets in the module. //
// The file format is simple: the first 8 bytes is the magic,
// one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the
// magic defines the size of the following offsets.
// The rest of the data is the offsets in the module.
// //
// Eventually, this coverage implementation should be obsoleted by a more // Eventually, this coverage implementation should be obsoleted by a more
// powerful general purpose Clang/LLVM coverage instrumentation. // powerful general purpose Clang/LLVM coverage instrumentation.
@ -39,7 +45,12 @@
#include "sanitizer_symbolizer.h" #include "sanitizer_symbolizer.h"
#include "sanitizer_flags.h" #include "sanitizer_flags.h"
atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once. static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
static atomic_uintptr_t coverage_counter;
// pc_array is the array containing the covered PCs. // pc_array is the array containing the covered PCs.
// To make the pc_array thread- and async-signal-safe it has to be large enough. // To make the pc_array thread- and async-signal-safe it has to be large enough.
@ -50,29 +61,55 @@ atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
// dump current memory layout to another file. // dump current memory layout to another file.
static bool cov_sandboxed = false; static bool cov_sandboxed = false;
static int cov_fd = kInvalidFd; static fd_t cov_fd = kInvalidFd;
static unsigned int cov_max_block_size = 0; static unsigned int cov_max_block_size = 0;
static bool coverage_enabled = false;
static const char *coverage_dir;
namespace __sanitizer { namespace __sanitizer {
class CoverageData { class CoverageData {
public: public:
void Init(); void Init();
void Enable();
void Disable();
void ReInit();
void BeforeFork(); void BeforeFork();
void AfterFork(int child_pid); void AfterFork(int child_pid);
void Extend(uptr npcs); void Extend(uptr npcs);
void Add(uptr pc); void Add(uptr pc, u32 *guard);
void IndirCall(uptr caller, uptr callee, uptr callee_cache[], void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
uptr cache_size); uptr cache_size);
void DumpCallerCalleePairs(); void DumpCallerCalleePairs();
void DumpTrace();
void DumpAsBitSet();
void DumpCounters();
void DumpOffsets();
void DumpAll();
ALWAYS_INLINE
void TraceBasicBlock(s32 *id);
void InitializeGuardArray(s32 *guards);
void InitializeGuards(s32 *guards, uptr n, const char *module_name,
uptr caller_pc);
void InitializeCounters(u8 *counters, uptr n);
void ReinitializeGuards();
uptr GetNumberOf8bitCounters();
uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
uptr *data(); uptr *data();
uptr size(); uptr size();
private: private:
void DirectOpen();
void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
// Maximal size pc array may ever grow. // Maximal size pc array may ever grow.
// We MmapNoReserve this space to ensure that the array is contiguous. // We MmapNoReserve this space to ensure that the array is contiguous.
static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27); static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(
1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)),
1 << 27);
// The amount file mapping for the pc array is grown by. // The amount file mapping for the pc array is grown by.
static const uptr kPcArrayMmapSize = 64 * 1024; static const uptr kPcArrayMmapSize = 64 * 1024;
@ -86,7 +123,27 @@ class CoverageData {
// Current file mapped size of the pc array. // Current file mapped size of the pc array.
uptr pc_array_mapped_size; uptr pc_array_mapped_size;
// Descriptor of the file mapped pc array. // Descriptor of the file mapped pc array.
int pc_fd; fd_t pc_fd;
// Vector of coverage guard arrays, protected by mu.
InternalMmapVectorNoCtor<s32*> guard_array_vec;
struct NamedPcRange {
const char *copied_module_name;
uptr beg, end; // elements [beg,end) in pc_array.
};
// Vector of module and compilation unit pc ranges.
InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
struct CounterAndSize {
u8 *counters;
uptr n;
};
InternalMmapVectorNoCtor<CounterAndSize> counters_vec;
uptr num_8bit_counters;
// Caller-Callee (cc) array, size and current index. // Caller-Callee (cc) array, size and current index.
static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24); static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
@ -94,59 +151,131 @@ class CoverageData {
atomic_uintptr_t cc_array_index; atomic_uintptr_t cc_array_index;
atomic_uintptr_t cc_array_size; atomic_uintptr_t cc_array_size;
// Tracing event array, size and current pointer.
// We record all events (basic block entries) in a global buffer of u32
// values. Each such value is the index in pc_array.
// So far the tracing is highly experimental:
// - not thread-safe;
// - does not support long traces;
// - not tuned for performance.
static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
u32 *tr_event_array;
uptr tr_event_array_size;
u32 *tr_event_pointer;
static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
StaticSpinMutex mu; StaticSpinMutex mu;
void DirectOpen();
void ReInit();
}; };
static CoverageData coverage_data; static CoverageData coverage_data;
void CovUpdateMapping(const char *path, uptr caller_pc = 0);
void CoverageData::DirectOpen() { void CoverageData::DirectOpen() {
InternalScopedString path(1024); InternalScopedString path(kMaxPathLength);
internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw", internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
common_flags()->coverage_dir, internal_getpid()); coverage_dir, internal_getpid());
pc_fd = OpenFile(path.data(), true); pc_fd = OpenFile(path.data(), RdWr);
if (internal_iserror(pc_fd)) { if (pc_fd == kInvalidFd) {
Report(" Coverage: failed to open %s for writing\n", path.data()); Report("Coverage: failed to open %s for reading/writing\n", path.data());
Die(); Die();
} }
pc_array_mapped_size = 0; pc_array_mapped_size = 0;
CovUpdateMapping(); CovUpdateMapping(coverage_dir);
} }
void CoverageData::Init() { void CoverageData::Init() {
pc_fd = kInvalidFd;
}
void CoverageData::Enable() {
if (pc_array)
return;
pc_array = reinterpret_cast<uptr *>( pc_array = reinterpret_cast<uptr *>(
MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit")); MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
pc_fd = kInvalidFd; atomic_store(&pc_array_index, 0, memory_order_relaxed);
if (common_flags()->coverage_direct) { if (common_flags()->coverage_direct) {
atomic_store(&pc_array_size, 0, memory_order_relaxed); atomic_store(&pc_array_size, 0, memory_order_relaxed);
atomic_store(&pc_array_index, 0, memory_order_relaxed);
} else { } else {
atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed); atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
atomic_store(&pc_array_index, 0, memory_order_relaxed);
} }
cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie( cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array")); sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed); atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
atomic_store(&cc_array_index, 0, memory_order_relaxed); atomic_store(&cc_array_index, 0, memory_order_relaxed);
// Allocate tr_event_array with a guard page at the end.
tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
"CovInit::tr_event_array"));
MprotectNoAccess(
reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
GetMmapGranularity());
tr_event_array_size = kTrEventArrayMaxSize;
tr_event_pointer = tr_event_array;
num_8bit_counters = 0;
}
void CoverageData::InitializeGuardArray(s32 *guards) {
Enable(); // Make sure coverage is enabled at this point.
s32 n = guards[0];
for (s32 j = 1; j <= n; j++) {
uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
guards[j] = -static_cast<s32>(idx + 1);
}
}
void CoverageData::Disable() {
if (pc_array) {
UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize);
pc_array = nullptr;
}
if (cc_array) {
UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
cc_array = nullptr;
}
if (tr_event_array) {
UnmapOrDie(tr_event_array,
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
GetMmapGranularity());
tr_event_array = nullptr;
tr_event_pointer = nullptr;
}
if (pc_fd != kInvalidFd) {
CloseFile(pc_fd);
pc_fd = kInvalidFd;
}
}
void CoverageData::ReinitializeGuards() {
// Assuming single thread.
atomic_store(&pc_array_index, 0, memory_order_relaxed);
for (uptr i = 0; i < guard_array_vec.size(); i++)
InitializeGuardArray(guard_array_vec[i]);
} }
void CoverageData::ReInit() { void CoverageData::ReInit() {
internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize); Disable();
if (pc_fd != kInvalidFd) internal_close(pc_fd); if (coverage_enabled) {
if (common_flags()->coverage_direct) { if (common_flags()->coverage_direct) {
// In memory-mapped mode we must extend the new file to the known array // In memory-mapped mode we must extend the new file to the known array
// size. // size.
uptr size = atomic_load(&pc_array_size, memory_order_relaxed); uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
Init(); uptr npcs = size / sizeof(uptr);
if (size) Extend(size); Enable();
} else { if (size) Extend(npcs);
Init(); if (coverage_enabled) CovUpdateMapping(coverage_dir);
} else {
Enable();
}
} }
// Re-initialize the guards.
// We are single-threaded now, no need to grab any lock.
CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
ReinitializeGuards();
} }
void CoverageData::BeforeFork() { void CoverageData::BeforeFork() {
@ -164,15 +293,16 @@ void CoverageData::Extend(uptr npcs) {
if (!common_flags()->coverage_direct) return; if (!common_flags()->coverage_direct) return;
SpinMutexLock l(&mu); SpinMutexLock l(&mu);
if (pc_fd == kInvalidFd) DirectOpen();
CHECK_NE(pc_fd, kInvalidFd);
uptr size = atomic_load(&pc_array_size, memory_order_relaxed); uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
size += npcs * sizeof(uptr); size += npcs * sizeof(uptr);
if (size > pc_array_mapped_size) { if (coverage_enabled && size > pc_array_mapped_size) {
if (pc_fd == kInvalidFd) DirectOpen();
CHECK_NE(pc_fd, kInvalidFd);
uptr new_mapped_size = pc_array_mapped_size; uptr new_mapped_size = pc_array_mapped_size;
while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize; while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
// Extend the file and map the new space at the end of pc_array. // Extend the file and map the new space at the end of pc_array.
uptr res = internal_ftruncate(pc_fd, new_mapped_size); uptr res = internal_ftruncate(pc_fd, new_mapped_size);
@ -181,24 +311,100 @@ void CoverageData::Extend(uptr npcs) {
Printf("failed to extend raw coverage file: %d\n", err); Printf("failed to extend raw coverage file: %d\n", err);
Die(); Die();
} }
void *p = MapWritableFileToMemory(pc_array + pc_array_mapped_size,
uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
void *p = MapWritableFileToMemory((void *)next_map_base,
new_mapped_size - pc_array_mapped_size, new_mapped_size - pc_array_mapped_size,
pc_fd, pc_array_mapped_size); pc_fd, pc_array_mapped_size);
CHECK_EQ(p, pc_array + pc_array_mapped_size); CHECK_EQ((uptr)p, next_map_base);
pc_array_mapped_size = new_mapped_size; pc_array_mapped_size = new_mapped_size;
} }
atomic_store(&pc_array_size, size, memory_order_release); atomic_store(&pc_array_size, size, memory_order_release);
} }
// Simply add the pc into the vector under lock. If the function is called more void CoverageData::InitializeCounters(u8 *counters, uptr n) {
// than once for a given PC it will be inserted multiple times, which is fine. if (!counters) return;
void CoverageData::Add(uptr pc) { CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0);
n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned.
SpinMutexLock l(&mu);
counters_vec.push_back({counters, n});
num_8bit_counters += n;
}
void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg,
uptr range_end) {
auto sym = Symbolizer::GetOrInit();
if (!sym)
return;
const char *module_name = sym->GetModuleNameForPc(caller_pc);
if (!module_name) return;
if (module_name_vec.empty() ||
module_name_vec.back().copied_module_name != module_name)
module_name_vec.push_back({module_name, range_beg, range_end});
else
module_name_vec.back().end = range_end;
}
void CoverageData::InitializeGuards(s32 *guards, uptr n,
const char *comp_unit_name,
uptr caller_pc) {
// The array 'guards' has n+1 elements, we use the element zero
// to store 'n'.
CHECK_LT(n, 1 << 30);
guards[0] = static_cast<s32>(n);
InitializeGuardArray(guards);
SpinMutexLock l(&mu);
uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed);
uptr range_beg = range_end - n;
comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end});
guard_array_vec.push_back(guards);
UpdateModuleNameVec(caller_pc, range_beg, range_end);
}
static const uptr kBundleCounterBits = 16;
// When coverage_order_pcs==true and SANITIZER_WORDSIZE==64
// we insert the global counter into the first 16 bits of the PC.
uptr BundlePcAndCounter(uptr pc, uptr counter) {
if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
return pc;
static const uptr kMaxCounter = (1 << kBundleCounterBits) - 1;
if (counter > kMaxCounter)
counter = kMaxCounter;
CHECK_EQ(0, pc >> (SANITIZER_WORDSIZE - kBundleCounterBits));
return pc | (counter << (SANITIZER_WORDSIZE - kBundleCounterBits));
}
uptr UnbundlePc(uptr bundle) {
if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
return bundle;
return (bundle << kBundleCounterBits) >> kBundleCounterBits;
}
uptr UnbundleCounter(uptr bundle) {
if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
return 0;
return bundle >> (SANITIZER_WORDSIZE - kBundleCounterBits);
}
// If guard is negative, atomically set it to -guard and store the PC in
// pc_array.
void CoverageData::Add(uptr pc, u32 *guard) {
atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
if (guard_value >= 0) return;
atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
if (!pc_array) return; if (!pc_array) return;
uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
uptr idx = -guard_value - 1;
if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
return; // May happen after fork when pc_array_index becomes 0.
CHECK_LT(idx * sizeof(uptr), CHECK_LT(idx * sizeof(uptr),
atomic_load(&pc_array_size, memory_order_acquire)); atomic_load(&pc_array_size, memory_order_acquire));
pc_array[idx] = pc; uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
pc_array[idx] = BundlePcAndCounter(pc, counter);
} }
// Registers a pair caller=>callee. // Registers a pair caller=>callee.
@ -226,13 +432,73 @@ void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
for (uptr i = 2; i < cache_size; i++) { for (uptr i = 2; i < cache_size; i++) {
uptr was = 0; uptr was = 0;
if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee, if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
memory_order_seq_cst)) memory_order_seq_cst)) {
atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
return; return;
}
if (was == callee) // Already have this callee. if (was == callee) // Already have this callee.
return; return;
} }
} }
uptr CoverageData::GetNumberOf8bitCounters() {
return num_8bit_counters;
}
// Map every 8bit counter to a 8-bit bitset and clear the counter.
uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) {
uptr num_new_bits = 0;
uptr cur = 0;
// For better speed we map 8 counters to 8 bytes of bitset at once.
static const uptr kBatchSize = 8;
CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0);
for (uptr i = 0, len = counters_vec.size(); i < len; i++) {
u8 *c = counters_vec[i].counters;
uptr n = counters_vec[i].n;
CHECK_EQ(n % 16, 0);
CHECK_EQ(cur % kBatchSize, 0);
CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0);
if (!bitset) {
internal_bzero_aligned16(c, n);
cur += n;
continue;
}
for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) {
CHECK_LT(cur, num_8bit_counters);
u64 *pc64 = reinterpret_cast<u64*>(c + j);
u64 *pb64 = reinterpret_cast<u64*>(bitset + cur);
u64 c64 = *pc64;
u64 old_bits_64 = *pb64;
u64 new_bits_64 = old_bits_64;
if (c64) {
*pc64 = 0;
for (uptr k = 0; k < kBatchSize; k++) {
u64 x = (c64 >> (8 * k)) & 0xff;
if (x) {
u64 bit = 0;
/**/ if (x >= 128) bit = 128;
else if (x >= 32) bit = 64;
else if (x >= 16) bit = 32;
else if (x >= 8) bit = 16;
else if (x >= 4) bit = 8;
else if (x >= 3) bit = 4;
else if (x >= 2) bit = 2;
else if (x >= 1) bit = 1;
u64 mask = bit << (8 * k);
if (!(new_bits_64 & mask)) {
num_new_bits++;
new_bits_64 |= mask;
}
}
}
*pb64 = new_bits_64;
}
}
}
CHECK_EQ(cur, num_8bit_counters);
return num_new_bits;
}
uptr *CoverageData::data() { uptr *CoverageData::data() {
return pc_array; return pc_array;
} }
@ -251,15 +517,15 @@ struct CovHeader {
static void CovWritePacked(int pid, const char *module, const void *blob, static void CovWritePacked(int pid, const char *module, const void *blob,
unsigned int blob_size) { unsigned int blob_size) {
if (cov_fd < 0) return; if (cov_fd == kInvalidFd) return;
unsigned module_name_length = internal_strlen(module); unsigned module_name_length = internal_strlen(module);
CovHeader header = {pid, module_name_length, blob_size}; CovHeader header = {pid, module_name_length, blob_size};
if (cov_max_block_size == 0) { if (cov_max_block_size == 0) {
// Writing to a file. Just go ahead. // Writing to a file. Just go ahead.
internal_write(cov_fd, &header, sizeof(header)); WriteToFile(cov_fd, &header, sizeof(header));
internal_write(cov_fd, module, module_name_length); WriteToFile(cov_fd, module, module_name_length);
internal_write(cov_fd, blob, blob_size); WriteToFile(cov_fd, blob, blob_size);
} else { } else {
// Writing to a socket. We want to split the data into appropriately sized // Writing to a socket. We want to split the data into appropriately sized
// blocks. // blocks.
@ -275,15 +541,14 @@ static void CovWritePacked(int pid, const char *module, const void *blob,
internal_memcpy(block_pos, module, module_name_length); internal_memcpy(block_pos, module, module_name_length);
block_pos += module_name_length; block_pos += module_name_length;
char *block_data_begin = block_pos; char *block_data_begin = block_pos;
char *blob_pos = (char *)blob; const char *blob_pos = (const char *)blob;
while (blob_size > 0) { while (blob_size > 0) {
unsigned int payload_size = Min(blob_size, max_payload_size); unsigned int payload_size = Min(blob_size, max_payload_size);
blob_size -= payload_size; blob_size -= payload_size;
internal_memcpy(block_data_begin, blob_pos, payload_size); internal_memcpy(block_data_begin, blob_pos, payload_size);
blob_pos += payload_size; blob_pos += payload_size;
((CovHeader *)block.data())->data_length = payload_size; ((CovHeader *)block.data())->data_length = payload_size;
internal_write(cov_fd, block.data(), WriteToFile(cov_fd, block.data(), header_size_with_module + payload_size);
header_size_with_module + payload_size);
} }
} }
} }
@ -292,29 +557,77 @@ static void CovWritePacked(int pid, const char *module, const void *blob,
// If packed = true and name == 0: <pid>.<sancov>.<packed>. // If packed = true and name == 0: <pid>.<sancov>.<packed>.
// If packed = true and name != 0: <name>.<sancov>.<packed> (name is // If packed = true and name != 0: <name>.<sancov>.<packed> (name is
// user-supplied). // user-supplied).
static int CovOpenFile(bool packed, const char* name) { static fd_t CovOpenFile(InternalScopedString *path, bool packed,
InternalScopedBuffer<char> path(1024); const char *name, const char *extension = "sancov") {
path->clear();
if (!packed) { if (!packed) {
CHECK(name); CHECK(name);
internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov", path->append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
common_flags()->coverage_dir, name, internal_getpid()); extension);
} else { } else {
if (!name) if (!name)
internal_snprintf((char *)path.data(), path.size(), path->append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
"%s/%zd.sancov.packed", common_flags()->coverage_dir, extension);
internal_getpid());
else else
internal_snprintf((char *)path.data(), path.size(), "%s/%s.sancov.packed", path->append("%s/%s.%s.packed", coverage_dir, name, extension);
common_flags()->coverage_dir, name);
}
uptr fd = OpenFile(path.data(), true);
if (internal_iserror(fd)) {
Report(" SanitizerCoverage: failed to open %s for writing\n", path.data());
return -1;
} }
error_t err;
fd_t fd = OpenFile(path->data(), WrOnly, &err);
if (fd == kInvalidFd)
Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
path->data(), err);
return fd; return fd;
} }
// Dump trace PCs and trace events into two separate files.
void CoverageData::DumpTrace() {
uptr max_idx = tr_event_pointer - tr_event_array;
if (!max_idx) return;
auto sym = Symbolizer::GetOrInit();
if (!sym)
return;
InternalScopedString out(32 << 20);
for (uptr i = 0, n = size(); i < n; i++) {
const char *module_name = "<unknown>";
uptr module_address = 0;
sym->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array[i]), &module_name,
&module_address);
out.append("%s 0x%zx\n", module_name, module_address);
}
InternalScopedString path(kMaxPathLength);
fd_t fd = CovOpenFile(&path, false, "trace-points");
if (fd == kInvalidFd) return;
WriteToFile(fd, out.data(), out.length());
CloseFile(fd);
fd = CovOpenFile(&path, false, "trace-compunits");
if (fd == kInvalidFd) return;
out.clear();
for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
out.append("%s\n", comp_unit_name_vec[i].copied_module_name);
WriteToFile(fd, out.data(), out.length());
CloseFile(fd);
fd = CovOpenFile(&path, false, "trace-events");
if (fd == kInvalidFd) return;
uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
// The trace file could be huge, and may not be written with a single syscall.
while (bytes_to_write) {
uptr actually_written;
if (WriteToFile(fd, event_bytes, bytes_to_write, &actually_written) &&
actually_written <= bytes_to_write) {
bytes_to_write -= actually_written;
event_bytes += actually_written;
} else {
break;
}
}
CloseFile(fd);
VReport(1, " CovDump: Trace: %zd PCs written\n", size());
VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
}
// This function dumps the caller=>callee pairs into a file as a sequence of // This function dumps the caller=>callee pairs into a file as a sequence of
// lines like "module_name offset". // lines like "module_name offset".
void CoverageData::DumpCallerCalleePairs() { void CoverageData::DumpCallerCalleePairs() {
@ -347,88 +660,166 @@ void CoverageData::DumpCallerCalleePairs() {
callee_module_address); callee_module_address);
} }
} }
int fd = CovOpenFile(false, "caller-callee"); InternalScopedString path(kMaxPathLength);
if (fd < 0) return; fd_t fd = CovOpenFile(&path, false, "caller-callee");
internal_write(fd, out.data(), out.length()); if (fd == kInvalidFd) return;
internal_close(fd); WriteToFile(fd, out.data(), out.length());
CloseFile(fd);
VReport(1, " CovDump: %zd caller-callee pairs written\n", total); VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
} }
// Dump the coverage on disk. // Record the current PC into the event buffer.
static void CovDump() { // Every event is a u32 value (index in tr_pc_array_index) so we compute
if (!common_flags()->coverage || common_flags()->coverage_direct) return; // it once and then cache in the provided 'cache' storage.
#if !SANITIZER_WINDOWS //
if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed)) // This function will eventually be inlined by the compiler.
return; void CoverageData::TraceBasicBlock(s32 *id) {
uptr size = coverage_data.size(); // Will trap here if
InternalMmapVector<u32> offsets(size); // 1. coverage is not enabled at run-time.
uptr *vb = coverage_data.data(); // 2. The array tr_event_array is full.
uptr *ve = vb + size; *tr_event_pointer = static_cast<u32>(*id - 1);
SortArray(vb, size); tr_event_pointer++;
MemoryMappingLayout proc_maps(/*cache_enabled*/true); }
uptr mb, me, off, prot;
InternalScopedBuffer<char> module(4096); void CoverageData::DumpCounters() {
InternalScopedBuffer<char> path(4096 * 2); if (!common_flags()->coverage_counters) return;
for (int i = 0; uptr n = coverage_data.GetNumberOf8bitCounters();
proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot); if (!n) return;
i++) { InternalScopedBuffer<u8> bitset(n);
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0) coverage_data.Update8bitCounterBitsetAndClearCounters(bitset.data());
continue; InternalScopedString path(kMaxPathLength);
while (vb < ve && *vb < mb) vb++;
if (vb >= ve) break; for (uptr m = 0; m < module_name_vec.size(); m++) {
if (*vb < me) { auto r = module_name_vec[m];
offsets.clear(); CHECK(r.copied_module_name);
const uptr *old_vb = vb; CHECK_LE(r.beg, r.end);
CHECK_LE(off, *vb); CHECK_LE(r.end, size());
for (; vb < ve && *vb < me; vb++) { const char *base_name = StripModuleName(r.copied_module_name);
uptr diff = *vb - (i ? mb : 0) + off; fd_t fd =
CHECK_LE(diff, 0xffffffffU); CovOpenFile(&path, /* packed */ false, base_name, "counters-sancov");
offsets.push_back(static_cast<u32>(diff)); if (fd == kInvalidFd) return;
} WriteToFile(fd, bitset.data() + r.beg, r.end - r.beg);
const char *module_name = StripModuleName(module.data()); CloseFile(fd);
if (cov_sandboxed) { VReport(1, " CovDump: %zd counters written for '%s'\n", r.end - r.beg,
if (cov_fd >= 0) { base_name);
CovWritePacked(internal_getpid(), module_name, offsets.data(), }
offsets.size() * sizeof(u32)); }
VReport(1, " CovDump: %zd PCs written to packed file\n", vb - old_vb);
} void CoverageData::DumpAsBitSet() {
} else { if (!common_flags()->coverage_bitset) return;
// One file per module per process. if (!size()) return;
internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov", InternalScopedBuffer<char> out(size());
common_flags()->coverage_dir, module_name, InternalScopedString path(kMaxPathLength);
internal_getpid()); for (uptr m = 0; m < module_name_vec.size(); m++) {
int fd = CovOpenFile(false /* packed */, module_name); uptr n_set_bits = 0;
if (fd > 0) { auto r = module_name_vec[m];
internal_write(fd, offsets.data(), offsets.size() * sizeof(u32)); CHECK(r.copied_module_name);
internal_close(fd); CHECK_LE(r.beg, r.end);
VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), CHECK_LE(r.end, size());
vb - old_vb); for (uptr i = r.beg; i < r.end; i++) {
} uptr pc = UnbundlePc(pc_array[i]);
out[i] = pc ? '1' : '0';
if (pc)
n_set_bits++;
}
const char *base_name = StripModuleName(r.copied_module_name);
fd_t fd = CovOpenFile(&path, /* packed */false, base_name, "bitset-sancov");
if (fd == kInvalidFd) return;
WriteToFile(fd, out.data() + r.beg, r.end - r.beg);
CloseFile(fd);
VReport(1,
" CovDump: bitset of %zd bits written for '%s', %zd bits are set\n",
r.end - r.beg, base_name, n_set_bits);
}
}
void CoverageData::DumpOffsets() {
auto sym = Symbolizer::GetOrInit();
if (!common_flags()->coverage_pcs) return;
CHECK_NE(sym, nullptr);
InternalMmapVector<uptr> offsets(0);
InternalScopedString path(kMaxPathLength);
for (uptr m = 0; m < module_name_vec.size(); m++) {
offsets.clear();
uptr num_words_for_magic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
for (uptr i = 0; i < num_words_for_magic; i++)
offsets.push_back(0);
auto r = module_name_vec[m];
CHECK(r.copied_module_name);
CHECK_LE(r.beg, r.end);
CHECK_LE(r.end, size());
for (uptr i = r.beg; i < r.end; i++) {
uptr pc = UnbundlePc(pc_array[i]);
uptr counter = UnbundleCounter(pc_array[i]);
if (!pc) continue; // Not visited.
uptr offset = 0;
sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
offsets.push_back(BundlePcAndCounter(offset, counter));
}
CHECK_GE(offsets.size(), num_words_for_magic);
SortArray(offsets.data(), offsets.size());
for (uptr i = 0; i < offsets.size(); i++)
offsets[i] = UnbundlePc(offsets[i]);
uptr num_offsets = offsets.size() - num_words_for_magic;
u64 *magic_p = reinterpret_cast<u64*>(offsets.data());
CHECK_EQ(*magic_p, 0ULL);
// FIXME: we may want to write 32-bit offsets even in 64-mode
// if all the offsets are small enough.
*magic_p = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
const char *module_name = StripModuleName(r.copied_module_name);
if (cov_sandboxed) {
if (cov_fd != kInvalidFd) {
CovWritePacked(internal_getpid(), module_name, offsets.data(),
offsets.size() * sizeof(offsets[0]));
VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets);
} }
} else {
// One file per module per process.
fd_t fd = CovOpenFile(&path, false /* packed */, module_name);
if (fd == kInvalidFd) continue;
WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0]));
CloseFile(fd);
VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets);
} }
} }
if (cov_fd >= 0) if (cov_fd != kInvalidFd)
internal_close(cov_fd); CloseFile(cov_fd);
coverage_data.DumpCallerCalleePairs(); }
#endif // !SANITIZER_WINDOWS
void CoverageData::DumpAll() {
if (!coverage_enabled || common_flags()->coverage_direct) return;
if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
return;
DumpAsBitSet();
DumpCounters();
DumpTrace();
DumpOffsets();
DumpCallerCalleePairs();
} }
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) { void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
if (!args) return; if (!args) return;
if (!common_flags()->coverage) return; if (!coverage_enabled) return;
cov_sandboxed = args->coverage_sandboxed; cov_sandboxed = args->coverage_sandboxed;
if (!cov_sandboxed) return; if (!cov_sandboxed) return;
cov_fd = args->coverage_fd;
cov_max_block_size = args->coverage_max_block_size; cov_max_block_size = args->coverage_max_block_size;
if (cov_fd < 0) if (args->coverage_fd >= 0) {
cov_fd = (fd_t)args->coverage_fd;
} else {
InternalScopedString path(kMaxPathLength);
// Pre-open the file now. The sandbox won't allow us to do it later. // Pre-open the file now. The sandbox won't allow us to do it later.
cov_fd = CovOpenFile(true /* packed */, 0); cov_fd = CovOpenFile(&path, true /* packed */, nullptr);
}
} }
int MaybeOpenCovFile(const char *name) { fd_t MaybeOpenCovFile(const char *name) {
CHECK(name); CHECK(name);
if (!common_flags()->coverage) return -1; if (!coverage_enabled) return kInvalidFd;
return CovOpenFile(true /* packed */, name); InternalScopedString path(kMaxPathLength);
return CovOpenFile(&path, true /* packed */, name);
} }
void CovBeforeFork() { void CovBeforeFork() {
@ -439,32 +830,114 @@ void CovAfterFork(int child_pid) {
coverage_data.AfterFork(child_pid); coverage_data.AfterFork(child_pid);
} }
} // namespace __sanitizer static void MaybeDumpCoverage() {
if (common_flags()->coverage)
__sanitizer_cov_dump();
}
void InitializeCoverage(bool enabled, const char *dir) {
if (coverage_enabled)
return; // May happen if two sanitizer enable coverage in the same process.
coverage_enabled = enabled;
coverage_dir = dir;
coverage_data.Init();
if (enabled) coverage_data.Enable();
if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
AddDieCallback(MaybeDumpCoverage);
}
void ReInitializeCoverage(bool enabled, const char *dir) {
coverage_enabled = enabled;
coverage_dir = dir;
coverage_data.ReInit();
}
void CoverageUpdateMapping() {
if (coverage_enabled)
CovUpdateMapping(coverage_dir);
}
} // namespace __sanitizer
extern "C" { extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC())); coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
guard);
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
if (static_cast<s32>(
__sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0)
__sanitizer_cov(guard);
} }
SANITIZER_INTERFACE_ATTRIBUTE void SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) { __sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()), coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
callee, callee_cache16, 16); callee, callee_cache16, 16);
} }
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
coverage_enabled = true;
coverage_dir = common_flags()->coverage_dir;
coverage_data.Init(); coverage_data.Init();
} }
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_module_init(uptr npcs) { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
if (!common_flags()->coverage || !common_flags()->coverage_direct) return; coverage_data.DumpAll();
if (SANITIZER_ANDROID) { }
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
const char *comp_unit_name) {
coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC());
coverage_data.InitializeCounters(counters, npcs);
if (!common_flags()->coverage_direct) return;
if (SANITIZER_ANDROID && coverage_enabled) {
// dlopen/dlclose interceptors do not work on Android, so we rely on // dlopen/dlclose interceptors do not work on Android, so we rely on
// Extend() calls to update .sancov.map. // Extend() calls to update .sancov.map.
CovUpdateMapping(GET_CALLER_PC()); CovUpdateMapping(coverage_dir, GET_CALLER_PC());
} }
coverage_data.Extend(npcs); coverage_data.Extend(npcs);
} }
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
sptr __sanitizer_maybe_open_cov_file(const char *name) { sptr __sanitizer_maybe_open_cov_file(const char *name) {
return MaybeOpenCovFile(name); return (sptr)MaybeOpenCovFile(name);
} }
} // extern "C" SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_total_unique_coverage() {
return atomic_load(&coverage_counter, memory_order_relaxed);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_cov_trace_func_enter(s32 *id) {
coverage_data.TraceBasicBlock(id);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_cov_trace_basic_block(s32 *id) {
coverage_data.TraceBasicBlock(id);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_reset_coverage() {
coverage_data.ReinitializeGuards();
internal_bzero_aligned16(
coverage_data.data(),
RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_coverage_guards(uptr **data) {
*data = coverage_data.data();
return coverage_data.size();
}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_number_of_counters() {
return coverage_data.GetNumberOf8bitCounters();
}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
}
// Default empty implementations (weak). Users should redefine them.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_cmp() {}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_switch() {}
} // extern "C"

View File

@ -33,7 +33,6 @@
namespace __sanitizer { namespace __sanitizer {
static const uptr kMaxNumberOfModules = 1 << 14;
static const uptr kMaxTextSize = 64 * 1024; static const uptr kMaxTextSize = 64 * 1024;
struct CachedMapping { struct CachedMapping {
@ -60,8 +59,8 @@ struct CachedMapping {
static CachedMapping cached_mapping; static CachedMapping cached_mapping;
static StaticSpinMutex mapping_mu; static StaticSpinMutex mapping_mu;
void CovUpdateMapping(uptr caller_pc) { void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) {
if (!common_flags()->coverage || !common_flags()->coverage_direct) return; if (!common_flags()->coverage_direct) return;
SpinMutexLock l(&mapping_mu); SpinMutexLock l(&mapping_mu);
@ -69,57 +68,58 @@ void CovUpdateMapping(uptr caller_pc) {
return; return;
InternalScopedString text(kMaxTextSize); InternalScopedString text(kMaxTextSize);
InternalScopedBuffer<char> modules_data(kMaxNumberOfModules *
sizeof(LoadedModule));
LoadedModule *modules = (LoadedModule *)modules_data.data();
CHECK(modules);
int n_modules = GetListOfModules(modules, kMaxNumberOfModules,
/* filter */ 0);
text.append("%d\n", sizeof(uptr) * 8); {
for (int i = 0; i < n_modules; ++i) { InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
const char *module_name = StripModuleName(modules[i].full_name()); CHECK(modules.data());
for (unsigned j = 0; j < modules[i].n_ranges(); ++j) { int n_modules = GetListOfModules(modules.data(), kMaxNumberOfModules,
if (modules[i].address_range_executable(j)) { /* filter */ nullptr);
uptr start = modules[i].address_range_start(j);
uptr end = modules[i].address_range_end(j); text.append("%d\n", sizeof(uptr) * 8);
uptr base = modules[i].base_address(); for (int i = 0; i < n_modules; ++i) {
text.append("%zx %zx %zx %s\n", start, end, base, module_name); const char *module_name = StripModuleName(modules[i].full_name());
if (caller_pc && caller_pc >= start && caller_pc < end) uptr base = modules[i].base_address();
cached_mapping.SetModuleRange(start, end); for (auto iter = modules[i].ranges(); iter.hasNext();) {
const auto *range = iter.next();
if (range->executable) {
uptr start = range->beg;
uptr end = range->end;
text.append("%zx %zx %zx %s\n", start, end, base, module_name);
if (caller_pc && caller_pc >= start && caller_pc < end)
cached_mapping.SetModuleRange(start, end);
}
} }
modules[i].clear();
} }
} }
int err; error_t err;
InternalScopedString tmp_path(64 + InternalScopedString tmp_path(64 + internal_strlen(coverage_dir));
internal_strlen(common_flags()->coverage_dir));
uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(), uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(),
"%s/%zd.sancov.map.tmp", common_flags()->coverage_dir, "%s/%zd.sancov.map.tmp", coverage_dir,
internal_getpid()); internal_getpid());
CHECK_LE(res, tmp_path.size()); CHECK_LE(res, tmp_path.size());
uptr map_fd = OpenFile(tmp_path.data(), true); fd_t map_fd = OpenFile(tmp_path.data(), WrOnly, &err);
if (internal_iserror(map_fd)) { if (map_fd == kInvalidFd) {
Report(" Coverage: failed to open %s for writing\n", tmp_path.data()); Report("Coverage: failed to open %s for writing: %d\n", tmp_path.data(),
err);
Die(); Die();
} }
res = internal_write(map_fd, text.data(), text.length()); if (!WriteToFile(map_fd, text.data(), text.length(), nullptr, &err)) {
if (internal_iserror(res, &err)) {
Printf("sancov.map write failed: %d\n", err); Printf("sancov.map write failed: %d\n", err);
Die(); Die();
} }
internal_close(map_fd); CloseFile(map_fd);
InternalScopedString path(64 + internal_strlen(common_flags()->coverage_dir)); InternalScopedString path(64 + internal_strlen(coverage_dir));
res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map", res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map",
common_flags()->coverage_dir, internal_getpid()); coverage_dir, internal_getpid());
CHECK_LE(res, path.size()); CHECK_LE(res, path.size());
res = internal_rename(tmp_path.data(), path.data()); if (!RenameFile(tmp_path.data(), path.data(), &err)) {
if (internal_iserror(res, &err)) {
Printf("sancov.map rename failed: %d\n", err); Printf("sancov.map rename failed: %d\n", err);
Die(); Die();
} }
} }
} // namespace __sanitizer } // namespace __sanitizer

View File

@ -38,19 +38,20 @@ struct DD : public DDetector {
explicit DD(const DDFlags *flags); explicit DD(const DDFlags *flags);
DDPhysicalThread* CreatePhysicalThread(); DDPhysicalThread *CreatePhysicalThread() override;
void DestroyPhysicalThread(DDPhysicalThread *pt); void DestroyPhysicalThread(DDPhysicalThread *pt) override;
DDLogicalThread* CreateLogicalThread(u64 ctx); DDLogicalThread *CreateLogicalThread(u64 ctx) override;
void DestroyLogicalThread(DDLogicalThread *lt); void DestroyLogicalThread(DDLogicalThread *lt) override;
void MutexInit(DDCallback *cb, DDMutex *m); void MutexInit(DDCallback *cb, DDMutex *m) override;
void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock); void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock); void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock); bool trylock) override;
void MutexDestroy(DDCallback *cb, DDMutex *m); void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
void MutexDestroy(DDCallback *cb, DDMutex *m) override;
DDReport *GetReport(DDCallback *cb); DDReport *GetReport(DDCallback *cb) override;
void MutexEnsureID(DDLogicalThread *lt, DDMutex *m); void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
void ReportDeadlock(DDCallback *cb, DDMutex *m); void ReportDeadlock(DDCallback *cb, DDMutex *m);
@ -68,7 +69,7 @@ DD::DD(const DDFlags *flags)
} }
DDPhysicalThread* DD::CreatePhysicalThread() { DDPhysicalThread* DD::CreatePhysicalThread() {
return 0; return nullptr;
} }
void DD::DestroyPhysicalThread(DDPhysicalThread *pt) { void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
@ -178,10 +179,10 @@ void DD::MutexDestroy(DDCallback *cb,
DDReport *DD::GetReport(DDCallback *cb) { DDReport *DD::GetReport(DDCallback *cb) {
if (!cb->lt->report_pending) if (!cb->lt->report_pending)
return 0; return nullptr;
cb->lt->report_pending = false; cb->lt->report_pending = false;
return &cb->lt->rep; return &cb->lt->rep;
} }
} // namespace __sanitizer } // namespace __sanitizer
#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1 #endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1

View File

@ -70,10 +70,10 @@ struct DDCallback {
struct DDetector { struct DDetector {
static DDetector *Create(const DDFlags *flags); static DDetector *Create(const DDFlags *flags);
virtual DDPhysicalThread* CreatePhysicalThread() { return 0; } virtual DDPhysicalThread* CreatePhysicalThread() { return nullptr; }
virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {} virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {}
virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return 0; } virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return nullptr; }
virtual void DestroyLogicalThread(DDLogicalThread *lt) {} virtual void DestroyLogicalThread(DDLogicalThread *lt) {}
virtual void MutexInit(DDCallback *cb, DDMutex *m) {} virtual void MutexInit(DDCallback *cb, DDMutex *m) {}
@ -83,7 +83,7 @@ struct DDetector {
virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {} virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {}
virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {} virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}
virtual DDReport *GetReport(DDCallback *cb) { return 0; } virtual DDReport *GetReport(DDCallback *cb) { return nullptr; }
}; };
} // namespace __sanitizer } // namespace __sanitizer

View File

@ -0,0 +1,169 @@
//===-- sanitizer_flag_parser.cc ------------------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_flag_parser.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_flags.h"
#include "sanitizer_flag_parser.h"
namespace __sanitizer {
LowLevelAllocator FlagParser::Alloc;
class UnknownFlags {
static const int kMaxUnknownFlags = 20;
const char *unknown_flags_[kMaxUnknownFlags];
int n_unknown_flags_;
public:
void Add(const char *name) {
CHECK_LT(n_unknown_flags_, kMaxUnknownFlags);
unknown_flags_[n_unknown_flags_++] = name;
}
void Report() {
if (!n_unknown_flags_) return;
Printf("WARNING: found %d unrecognized flag(s):\n", n_unknown_flags_);
for (int i = 0; i < n_unknown_flags_; ++i)
Printf(" %s\n", unknown_flags_[i]);
n_unknown_flags_ = 0;
}
};
UnknownFlags unknown_flags;
void ReportUnrecognizedFlags() {
unknown_flags.Report();
}
char *FlagParser::ll_strndup(const char *s, uptr n) {
uptr len = internal_strnlen(s, n);
char *s2 = (char*)Alloc.Allocate(len + 1);
internal_memcpy(s2, s, len);
s2[len] = 0;
return s2;
}
void FlagParser::PrintFlagDescriptions() {
Printf("Available flags for %s:\n", SanitizerToolName);
for (int i = 0; i < n_flags_; ++i)
Printf("\t%s\n\t\t- %s\n", flags_[i].name, flags_[i].desc);
}
void FlagParser::fatal_error(const char *err) {
Printf("ERROR: %s\n", err);
Die();
}
bool FlagParser::is_space(char c) {
return c == ' ' || c == ',' || c == ':' || c == '\n' || c == '\t' ||
c == '\r';
}
void FlagParser::skip_whitespace() {
while (is_space(buf_[pos_])) ++pos_;
}
void FlagParser::parse_flag() {
uptr name_start = pos_;
while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_;
if (buf_[pos_] != '=') fatal_error("expected '='");
char *name = ll_strndup(buf_ + name_start, pos_ - name_start);
uptr value_start = ++pos_;
char *value;
if (buf_[pos_] == '\'' || buf_[pos_] == '"') {
char quote = buf_[pos_++];
while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_;
if (buf_[pos_] == 0) fatal_error("unterminated string");
value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1);
++pos_; // consume the closing quote
} else {
while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_;
if (buf_[pos_] != 0 && !is_space(buf_[pos_]))
fatal_error("expected separator or eol");
value = ll_strndup(buf_ + value_start, pos_ - value_start);
}
bool res = run_handler(name, value);
if (!res) fatal_error("Flag parsing failed.");
}
void FlagParser::parse_flags() {
while (true) {
skip_whitespace();
if (buf_[pos_] == 0) break;
parse_flag();
}
// Do a sanity check for certain flags.
if (common_flags_dont_use.malloc_context_size < 1)
common_flags_dont_use.malloc_context_size = 1;
}
void FlagParser::ParseString(const char *s) {
if (!s) return;
// Backup current parser state to allow nested ParseString() calls.
const char *old_buf_ = buf_;
uptr old_pos_ = pos_;
buf_ = s;
pos_ = 0;
parse_flags();
buf_ = old_buf_;
pos_ = old_pos_;
}
bool FlagParser::ParseFile(const char *path, bool ignore_missing) {
static const uptr kMaxIncludeSize = 1 << 15;
char *data;
uptr data_mapped_size;
error_t err;
uptr len;
if (!ReadFileToBuffer(path, &data, &data_mapped_size, &len,
Max(kMaxIncludeSize, GetPageSizeCached()), &err)) {
if (ignore_missing)
return true;
Printf("Failed to read options from '%s': error %d\n", path, err);
return false;
}
ParseString(data);
UnmapOrDie(data, data_mapped_size);
return true;
}
bool FlagParser::run_handler(const char *name, const char *value) {
for (int i = 0; i < n_flags_; ++i) {
if (internal_strcmp(name, flags_[i].name) == 0)
return flags_[i].handler->Parse(value);
}
// Unrecognized flag. This is not a fatal error, we may print a warning later.
unknown_flags.Add(name);
return true;
}
void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
const char *desc) {
CHECK_LT(n_flags_, kMaxFlags);
flags_[n_flags_].name = name;
flags_[n_flags_].desc = desc;
flags_[n_flags_].handler = handler;
++n_flags_;
}
FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
}
} // namespace __sanitizer

View File

@ -0,0 +1,120 @@
//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_FLAG_REGISTRY_H
#define SANITIZER_FLAG_REGISTRY_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_common.h"
namespace __sanitizer {
class FlagHandlerBase {
public:
virtual bool Parse(const char *value) { return false; }
};
template <typename T>
class FlagHandler : public FlagHandlerBase {
T *t_;
public:
explicit FlagHandler(T *t) : t_(t) {}
bool Parse(const char *value) final;
};
template <>
inline bool FlagHandler<bool>::Parse(const char *value) {
if (internal_strcmp(value, "0") == 0 ||
internal_strcmp(value, "no") == 0 ||
internal_strcmp(value, "false") == 0) {
*t_ = false;
return true;
}
if (internal_strcmp(value, "1") == 0 ||
internal_strcmp(value, "yes") == 0 ||
internal_strcmp(value, "true") == 0) {
*t_ = true;
return true;
}
Printf("ERROR: Invalid value for bool option: '%s'\n", value);
return false;
}
template <>
inline bool FlagHandler<const char *>::Parse(const char *value) {
*t_ = internal_strdup(value);
return true;
}
template <>
inline bool FlagHandler<int>::Parse(const char *value) {
char *value_end;
*t_ = internal_simple_strtoll(value, &value_end, 10);
bool ok = *value_end == 0;
if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value);
return ok;
}
template <>
inline bool FlagHandler<uptr>::Parse(const char *value) {
char *value_end;
*t_ = internal_simple_strtoll(value, &value_end, 10);
bool ok = *value_end == 0;
if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value);
return ok;
}
class FlagParser {
static const int kMaxFlags = 200;
struct Flag {
const char *name;
const char *desc;
FlagHandlerBase *handler;
} *flags_;
int n_flags_;
const char *buf_;
uptr pos_;
public:
FlagParser();
void RegisterHandler(const char *name, FlagHandlerBase *handler,
const char *desc);
void ParseString(const char *s);
bool ParseFile(const char *path, bool ignore_missing);
void PrintFlagDescriptions();
static LowLevelAllocator Alloc;
private:
void fatal_error(const char *err);
bool is_space(char c);
void skip_whitespace();
void parse_flags();
void parse_flag();
bool run_handler(const char *name, const char *value);
char *ll_strndup(const char *s, uptr n);
};
template <typename T>
static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
T *var) {
FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var); // NOLINT
parser->RegisterHandler(name, fh, desc);
}
void ReportUnrecognizedFlags();
} // namespace __sanitizer
#endif // SANITIZER_FLAG_REGISTRY_H

View File

@ -14,6 +14,7 @@
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_libc.h" #include "sanitizer_libc.h"
#include "sanitizer_list.h" #include "sanitizer_list.h"
#include "sanitizer_flag_parser.h"
namespace __sanitizer { namespace __sanitizer {
@ -32,274 +33,68 @@ IntrusiveList<FlagDescription> flag_descriptions;
# define SANITIZER_NEEDS_SEGV 1 # define SANITIZER_NEEDS_SEGV 1
#endif #endif
void SetCommonFlagsDefaults(CommonFlags *f) { void CommonFlags::SetDefaults() {
f->symbolize = true; #define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
f->external_symbolizer_path = 0; #include "sanitizer_flags.inc"
f->allow_addr2line = false; #undef COMMON_FLAG
f->strip_path_prefix = "";
f->fast_unwind_on_check = false;
f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true;
f->handle_ioctl = false;
f->malloc_context_size = 1;
f->log_path = "stderr";
f->verbosity = 0;
f->detect_leaks = true;
f->leak_check_at_exit = true;
f->allocator_may_return_null = false;
f->print_summary = true;
f->check_printf = true;
// TODO(glider): tools may want to set different defaults for handle_segv.
f->handle_segv = SANITIZER_NEEDS_SEGV;
f->allow_user_segv_handler = false;
f->use_sigaltstack = true;
f->detect_deadlocks = false;
f->clear_shadow_mmap_threshold = 64 * 1024;
f->color = "auto";
f->legacy_pthread_cond = false;
f->intercept_tls_get_addr = false;
f->coverage = false;
f->coverage_direct = SANITIZER_ANDROID;
f->coverage_dir = ".";
f->full_address_space = false;
f->suppressions = "";
f->print_suppressions = true;
f->disable_coredump = (SANITIZER_WORDSIZE == 64);
f->symbolize_inline_frames = true;
f->stack_trace_format = "DEFAULT";
} }
void ParseCommonFlagsFromString(CommonFlags *f, const char *str) { void CommonFlags::CopyFrom(const CommonFlags &other) {
ParseFlag(str, &f->symbolize, "symbolize", internal_memcpy(this, &other, sizeof(*this));
"If set, use the online symbolizer from common sanitizer runtime to turn "
"virtual addresses to file/line locations.");
ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path",
"Path to external symbolizer. If empty, the tool will search $PATH for "
"the symbolizer.");
ParseFlag(str, &f->allow_addr2line, "allow_addr2line",
"If set, allows online symbolizer to run addr2line binary to symbolize "
"stack traces (addr2line will only be used if llvm-symbolizer binary is "
"unavailable.");
ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix",
"Strips this prefix from file paths in error reports.");
ParseFlag(str, &f->fast_unwind_on_check, "fast_unwind_on_check",
"If available, use the fast frame-pointer-based unwinder on "
"internal CHECK failures.");
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal",
"If available, use the fast frame-pointer-based unwinder on fatal "
"errors.");
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc",
"If available, use the fast frame-pointer-based unwinder on "
"malloc/free.");
ParseFlag(str, &f->handle_ioctl, "handle_ioctl",
"Intercept and handle ioctl requests.");
ParseFlag(str, &f->malloc_context_size, "malloc_context_size",
"Max number of stack frames kept for each allocation/deallocation.");
ParseFlag(str, &f->log_path, "log_path",
"Write logs to \"log_path.pid\". The special values are \"stdout\" and "
"\"stderr\". The default is \"stderr\".");
ParseFlag(str, &f->verbosity, "verbosity",
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).");
ParseFlag(str, &f->detect_leaks, "detect_leaks",
"Enable memory leak detection.");
ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit",
"Invoke leak checking in an atexit handler. Has no effect if "
"detect_leaks=false, or if __lsan_do_leak_check() is called before the "
"handler has a chance to run.");
ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null",
"If false, the allocator will crash instead of returning 0 on "
"out-of-memory.");
ParseFlag(str, &f->print_summary, "print_summary",
"If false, disable printing error summaries in addition to error "
"reports.");
ParseFlag(str, &f->check_printf, "check_printf",
"Check printf arguments.");
ParseFlag(str, &f->handle_segv, "handle_segv",
"If set, registers the tool's custom SEGV handler (both SIGBUS and "
"SIGSEGV on OSX).");
ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler",
"If set, allows user to register a SEGV handler even if the tool "
"registers one.");
ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack",
"If set, uses alternate stack for signal handling.");
ParseFlag(str, &f->detect_deadlocks, "detect_deadlocks",
"If set, deadlock detection is enabled.");
ParseFlag(str, &f->clear_shadow_mmap_threshold,
"clear_shadow_mmap_threshold",
"Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
"memset(). This is the threshold size in bytes.");
ParseFlag(str, &f->color, "color",
"Colorize reports: (always|never|auto).");
ParseFlag(str, &f->legacy_pthread_cond, "legacy_pthread_cond",
"Enables support for dynamic libraries linked with libpthread 2.2.5.");
ParseFlag(str, &f->intercept_tls_get_addr, "intercept_tls_get_addr",
"Intercept __tls_get_addr.");
ParseFlag(str, &f->help, "help", "Print the flag descriptions.");
ParseFlag(str, &f->mmap_limit_mb, "mmap_limit_mb",
"Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
"not a user-facing flag, used mosly for testing the tools");
ParseFlag(str, &f->coverage, "coverage",
"If set, coverage information will be dumped at program shutdown (if the "
"coverage instrumentation was enabled at compile time).");
ParseFlag(str, &f->coverage_direct, "coverage_direct",
"If set, coverage information will be dumped directly to a memory "
"mapped file. This way data is not lost even if the process is "
"suddenly killed.");
ParseFlag(str, &f->coverage_dir, "coverage_dir",
"Target directory for coverage dumps. Defaults to the current "
"directory.");
ParseFlag(str, &f->full_address_space, "full_address_space",
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized");
ParseFlag(str, &f->suppressions, "suppressions", "Suppressions file name.");
ParseFlag(str, &f->print_suppressions, "print_suppressions",
"Print matched suppressions at exit.");
ParseFlag(str, &f->disable_coredump, "disable_coredump",
"Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
"dumping a 16T+ core file. Ignored on OSes that don't dump core by"
"default and for sanitizers that don't reserve lots of virtual memory.");
ParseFlag(str, &f->symbolize_inline_frames, "symbolize_inline_frames",
"Print inlined frames in stacktraces. Defaults to true.");
ParseFlag(str, &f->stack_trace_format, "stack_trace_format",
"Format string used to render stack frames. "
"See sanitizer_stacktrace_printer.h for the format description. "
"Use DEFAULT to get default format.");
// Do a sanity check for certain flags.
if (f->malloc_context_size < 1)
f->malloc_context_size = 1;
} }
static bool GetFlagValue(const char *env, const char *name, // Copy the string from "s" to "out", replacing "%b" with the binary basename.
const char **value, int *value_length) { static void SubstituteBinaryName(const char *s, char *out, uptr out_size) {
if (env == 0) char *out_end = out + out_size;
return false; while (*s && out < out_end - 1) {
const char *pos = 0; if (s[0] != '%' || s[1] != 'b') { *out++ = *s++; continue; }
for (;;) { const char *base = GetProcessName();
pos = internal_strstr(env, name); CHECK(base);
if (pos == 0) while (*base && out < out_end - 1)
return false; *out++ = *base++;
const char *name_end = pos + internal_strlen(name); s += 2; // skip "%b"
if ((pos != env && }
((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) || *out = '\0';
*name_end != '=') { }
// Seems to be middle of another flag name or value.
env = pos + 1; class FlagHandlerInclude : public FlagHandlerBase {
continue; FlagParser *parser_;
bool ignore_missing_;
public:
explicit FlagHandlerInclude(FlagParser *parser, bool ignore_missing)
: parser_(parser), ignore_missing_(ignore_missing) {}
bool Parse(const char *value) final {
if (internal_strchr(value, '%')) {
char *buf = (char *)MmapOrDie(kMaxPathLength, "FlagHandlerInclude");
SubstituteBinaryName(value, buf, kMaxPathLength);
bool res = parser_->ParseFile(buf, ignore_missing_);
UnmapOrDie(buf, kMaxPathLength);
return res;
} }
pos = name_end; return parser_->ParseFile(value, ignore_missing_);
break;
} }
const char *end; };
if (pos[0] != '=') {
end = pos; void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {
} else { FlagHandlerInclude *fh_include = new (FlagParser::Alloc) // NOLINT
pos += 1; FlagHandlerInclude(parser, /*ignore_missing*/ false);
if (pos[0] == '"') { parser->RegisterHandler("include", fh_include,
pos += 1; "read more options from the given file");
end = internal_strchr(pos, '"'); FlagHandlerInclude *fh_include_if_exists = new (FlagParser::Alloc) // NOLINT
} else if (pos[0] == '\'') { FlagHandlerInclude(parser, /*ignore_missing*/ true);
pos += 1; parser->RegisterHandler(
end = internal_strchr(pos, '\''); "include_if_exists", fh_include_if_exists,
} else { "read more options from the given file (if it exists)");
// Read until the next space or colon.
end = pos + internal_strcspn(pos, " :");
}
if (end == 0)
end = pos + internal_strlen(pos);
}
*value = pos;
*value_length = end - pos;
return true;
} }
static bool StartsWith(const char *flag, int flag_length, const char *value) { void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
if (!flag || !value) #define COMMON_FLAG(Type, Name, DefaultValue, Description) \
return false; RegisterFlag(parser, #Name, Description, &cf->Name);
int value_length = internal_strlen(value); #include "sanitizer_flags.inc"
return (flag_length >= value_length) && #undef COMMON_FLAG
(0 == internal_strncmp(flag, value, value_length));
}
static LowLevelAllocator allocator_for_flags; RegisterIncludeFlags(parser, cf);
// The linear scan is suboptimal, but the number of flags is relatively small.
bool FlagInDescriptionList(const char *name) {
IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
while (it.hasNext()) {
if (!internal_strcmp(it.next()->name, name)) return true;
}
return false;
}
void AddFlagDescription(const char *name, const char *description) {
if (FlagInDescriptionList(name)) return;
FlagDescription *new_description = new(allocator_for_flags) FlagDescription;
new_description->name = name;
new_description->description = description;
flag_descriptions.push_back(new_description);
}
// TODO(glider): put the descriptions inside CommonFlags.
void PrintFlagDescriptions() {
IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
Printf("Available flags for %s:\n", SanitizerToolName);
while (it.hasNext()) {
FlagDescription *descr = it.next();
Printf("\t%s\n\t\t- %s\n", descr->name, descr->description);
}
}
void ParseFlag(const char *env, bool *flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
if (StartsWith(value, value_length, "0") ||
StartsWith(value, value_length, "no") ||
StartsWith(value, value_length, "false"))
*flag = false;
if (StartsWith(value, value_length, "1") ||
StartsWith(value, value_length, "yes") ||
StartsWith(value, value_length, "true"))
*flag = true;
}
void ParseFlag(const char *env, int *flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
*flag = static_cast<int>(internal_atoll(value));
}
void ParseFlag(const char *env, uptr *flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
*flag = static_cast<uptr>(internal_atoll(value));
}
void ParseFlag(const char *env, const char **flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
// Copy the flag value. Don't use locks here, as flags are parsed at
// tool startup.
char *value_copy = (char*)(allocator_for_flags.Allocate(value_length + 1));
internal_memcpy(value_copy, value, value_length);
value_copy[value_length] = '\0';
*flag = value_copy;
} }
} // namespace __sanitizer } // namespace __sanitizer

View File

@ -16,62 +16,38 @@
namespace __sanitizer { namespace __sanitizer {
void ParseFlag(const char *env, bool *flag,
const char *name, const char *descr);
void ParseFlag(const char *env, int *flag,
const char *name, const char *descr);
void ParseFlag(const char *env, uptr *flag,
const char *name, const char *descr);
void ParseFlag(const char *env, const char **flag,
const char *name, const char *descr);
struct CommonFlags { struct CommonFlags {
bool symbolize; #define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
const char *external_symbolizer_path; #include "sanitizer_flags.inc"
bool allow_addr2line; #undef COMMON_FLAG
const char *strip_path_prefix;
bool fast_unwind_on_check; void SetDefaults();
bool fast_unwind_on_fatal; void CopyFrom(const CommonFlags &other);
bool fast_unwind_on_malloc;
bool handle_ioctl;
int malloc_context_size;
const char *log_path;
int verbosity;
bool detect_leaks;
bool leak_check_at_exit;
bool allocator_may_return_null;
bool print_summary;
bool check_printf;
bool handle_segv;
bool allow_user_segv_handler;
bool use_sigaltstack;
bool detect_deadlocks;
uptr clear_shadow_mmap_threshold;
const char *color;
bool legacy_pthread_cond;
bool intercept_tls_get_addr;
bool help;
uptr mmap_limit_mb;
bool coverage;
bool coverage_direct;
const char *coverage_dir;
bool full_address_space;
const char *suppressions;
bool print_suppressions;
bool disable_coredump;
bool symbolize_inline_frames;
const char *stack_trace_format;
}; };
inline CommonFlags *common_flags() { // Functions to get/set global CommonFlags shared by all sanitizer runtimes:
extern CommonFlags common_flags_dont_use; extern CommonFlags common_flags_dont_use;
inline const CommonFlags *common_flags() {
return &common_flags_dont_use; return &common_flags_dont_use;
} }
void SetCommonFlagsDefaults(CommonFlags *f); inline void SetCommonFlagsDefaults() {
void ParseCommonFlagsFromString(CommonFlags *f, const char *str); common_flags_dont_use.SetDefaults();
void PrintFlagDescriptions(); }
// This function can only be used to setup tool-specific overrides for
// CommonFlags defaults. Generally, it should only be used right after
// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and
// only during the flags initialization (i.e. before they are used for
// the first time).
inline void OverrideCommonFlags(const CommonFlags &cf) {
common_flags_dont_use.CopyFrom(cf);
}
class FlagParser;
void RegisterCommonFlags(FlagParser *parser,
CommonFlags *cf = &common_flags_dont_use);
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf);
} // namespace __sanitizer } // namespace __sanitizer
#endif // SANITIZER_FLAGS_H #endif // SANITIZER_FLAGS_H

View File

@ -0,0 +1,192 @@
//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes common flags available in all sanitizers.
//
//===----------------------------------------------------------------------===//
#ifndef COMMON_FLAG
#error "Define COMMON_FLAG prior to including this file!"
#endif
// COMMON_FLAG(Type, Name, DefaultValue, Description)
// Supported types: bool, const char *, int, uptr.
// Default value must be a compile-time constant.
// Description must be a string literal.
COMMON_FLAG(
bool, symbolize, true,
"If set, use the online symbolizer from common sanitizer runtime to turn "
"virtual addresses to file/line locations.")
COMMON_FLAG(
const char *, external_symbolizer_path, nullptr,
"Path to external symbolizer. If empty, the tool will search $PATH for "
"the symbolizer.")
COMMON_FLAG(
bool, allow_addr2line, false,
"If set, allows online symbolizer to run addr2line binary to symbolize "
"stack traces (addr2line will only be used if llvm-symbolizer binary is "
"unavailable.")
COMMON_FLAG(const char *, strip_path_prefix, "",
"Strips this prefix from file paths in error reports.")
COMMON_FLAG(bool, fast_unwind_on_check, false,
"If available, use the fast frame-pointer-based unwinder on "
"internal CHECK failures.")
COMMON_FLAG(bool, fast_unwind_on_fatal, false,
"If available, use the fast frame-pointer-based unwinder on fatal "
"errors.")
COMMON_FLAG(bool, fast_unwind_on_malloc, true,
"If available, use the fast frame-pointer-based unwinder on "
"malloc/free.")
COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
COMMON_FLAG(int, malloc_context_size, 1,
"Max number of stack frames kept for each allocation/deallocation.")
COMMON_FLAG(
const char *, log_path, "stderr",
"Write logs to \"log_path.pid\". The special values are \"stdout\" and "
"\"stderr\". The default is \"stderr\".")
COMMON_FLAG(
bool, log_exe_name, false,
"Mention name of executable when reporting error and "
"append executable name to logs (as in \"log_path.exe_name.pid\").")
COMMON_FLAG(
bool, log_to_syslog, SANITIZER_ANDROID,
"Write all sanitizer output to syslog in addition to other means of "
"logging.")
COMMON_FLAG(
int, verbosity, 0,
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
COMMON_FLAG(bool, detect_leaks, true, "Enable memory leak detection.")
COMMON_FLAG(
bool, leak_check_at_exit, true,
"Invoke leak checking in an atexit handler. Has no effect if "
"detect_leaks=false, or if __lsan_do_leak_check() is called before the "
"handler has a chance to run.")
COMMON_FLAG(bool, allocator_may_return_null, false,
"If false, the allocator will crash instead of returning 0 on "
"out-of-memory.")
COMMON_FLAG(bool, print_summary, true,
"If false, disable printing error summaries in addition to error "
"reports.")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
COMMON_FLAG(bool, handle_segv, SANITIZER_NEEDS_SEGV,
"If set, registers the tool's custom SIGSEGV/SIGBUS handler.")
COMMON_FLAG(bool, handle_abort, false,
"If set, registers the tool's custom SIGABRT handler.")
COMMON_FLAG(bool, handle_sigfpe, true,
"If set, registers the tool's custom SIGFPE handler.")
COMMON_FLAG(bool, allow_user_segv_handler, false,
"If set, allows user to register a SEGV handler even if the tool "
"registers one.")
COMMON_FLAG(bool, use_sigaltstack, true,
"If set, uses alternate stack for signal handling.")
COMMON_FLAG(bool, detect_deadlocks, false,
"If set, deadlock detection is enabled.")
COMMON_FLAG(
uptr, clear_shadow_mmap_threshold, 64 * 1024,
"Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
"memset(). This is the threshold size in bytes.")
COMMON_FLAG(const char *, color, "auto",
"Colorize reports: (always|never|auto).")
COMMON_FLAG(
bool, legacy_pthread_cond, false,
"Enables support for dynamic libraries linked with libpthread 2.2.5.")
COMMON_FLAG(bool, intercept_tls_get_addr, false, "Intercept __tls_get_addr.")
COMMON_FLAG(bool, help, false, "Print the flag descriptions.")
COMMON_FLAG(uptr, mmap_limit_mb, 0,
"Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
"not a user-facing flag, used mosly for testing the tools")
COMMON_FLAG(uptr, hard_rss_limit_mb, 0,
"Hard RSS limit in Mb."
" If non-zero, a background thread is spawned at startup"
" which periodically reads RSS and aborts the process if the"
" limit is reached")
COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
"Soft RSS limit in Mb."
" If non-zero, a background thread is spawned at startup"
" which periodically reads RSS. If the limit is reached"
" all subsequent malloc/new calls will fail or return NULL"
" (depending on the value of allocator_may_return_null)"
" until the RSS goes below the soft limit."
" This limit does not affect memory allocations other than"
" malloc/new.")
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
"If false, do not attempt to read /proc/maps/statm."
" Mostly useful for testing sanitizers.")
COMMON_FLAG(
bool, coverage, false,
"If set, coverage information will be dumped at program shutdown (if the "
"coverage instrumentation was enabled at compile time).")
COMMON_FLAG(bool, coverage_pcs, true,
"If set (and if 'coverage' is set too), the coverage information "
"will be dumped as a set of PC offsets for every module.")
COMMON_FLAG(bool, coverage_order_pcs, false,
"If true, the PCs will be dumped in the order they've"
" appeared during the execution.")
COMMON_FLAG(bool, coverage_bitset, false,
"If set (and if 'coverage' is set too), the coverage information "
"will also be dumped as a bitset to a separate file.")
COMMON_FLAG(bool, coverage_counters, false,
"If set (and if 'coverage' is set too), the bitmap that corresponds"
" to coverage counters will be dumped.")
COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
"If set, coverage information will be dumped directly to a memory "
"mapped file. This way data is not lost even if the process is "
"suddenly killed.")
COMMON_FLAG(const char *, coverage_dir, ".",
"Target directory for coverage dumps. Defaults to the current "
"directory.")
COMMON_FLAG(bool, full_address_space, false,
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized")
COMMON_FLAG(bool, print_suppressions, true,
"Print matched suppressions at exit.")
COMMON_FLAG(
bool, disable_coredump, (SANITIZER_WORDSIZE == 64),
"Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
"dumping a 16T+ core file. Ignored on OSes that don't dump core by"
"default and for sanitizers that don't reserve lots of virtual memory.")
COMMON_FLAG(bool, use_madv_dontdump, true,
"If set, instructs kernel to not store the (huge) shadow "
"in core file.")
COMMON_FLAG(bool, symbolize_inline_frames, true,
"Print inlined frames in stacktraces. Defaults to true.")
COMMON_FLAG(bool, symbolize_vs_style, false,
"Print file locations in Visual Studio style (e.g: "
" file(10,42): ...")
COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
"Format string used to render stack frames. "
"See sanitizer_stacktrace_printer.h for the format description. "
"Use DEFAULT to get default format.")
COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
"If true, the shadow is not allowed to use huge pages. ")
COMMON_FLAG(bool, strict_string_checks, false,
"If set check that string arguments are properly null-terminated")
COMMON_FLAG(bool, intercept_strstr, true,
"If set, uses custom wrappers for strstr and strcasestr functions "
"to find more errors.")
COMMON_FLAG(bool, intercept_strspn, true,
"If set, uses custom wrappers for strspn and strcspn function "
"to find more errors.")
COMMON_FLAG(bool, intercept_strpbrk, true,
"If set, uses custom wrappers for strpbrk function "
"to find more errors.")
COMMON_FLAG(bool, intercept_memcmp, true,
"If set, uses custom wrappers for memcmp function "
"to find more errors.")
COMMON_FLAG(bool, strict_memcmp, true,
"If true, assume that memcmp(p1, p2, n) always reads n bytes before "
"comparing p1 and p2.")
COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer "
"mappings in /proc/self/maps with "
"user-readable names")
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
"found an error")
COMMON_FLAG(
bool, abort_on_error, SANITIZER_MAC,
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.")

View File

@ -1,23 +0,0 @@
//===-- sanitizer_interception.h --------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Common macro definitions for interceptors.
// Always use this headers instead of interception/interception.h.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_INTERCEPTION_H
#define SANITIZER_INTERCEPTION_H
#include "interception/interception.h"
#include "sanitizer_common.h"
#if SANITIZER_LINUX && !defined(SANITIZER_GO)
#undef REAL
#define REAL(x) IndirectExternCall(__interception::PTR_TO_REAL(x))
#endif
#endif // SANITIZER_INTERCEPTION_H

View File

@ -0,0 +1,56 @@
//===-- sanitizer_interface_internal.h --------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between run-time libraries of sanitizers.
//
// This header declares the sanitizer runtime interface functions.
// The runtime library has to define these functions so the instrumented program
// could call them.
//
// See also include/sanitizer/common_interface_defs.h
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_INTERFACE_INTERNAL_H
#define SANITIZER_INTERFACE_INTERNAL_H
#include "sanitizer_internal_defs.h"
extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
// The special values are "stdout" and "stderr".
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
typedef struct {
int coverage_sandboxed;
__sanitizer::sptr coverage_fd;
unsigned int coverage_max_block_size;
} __sanitizer_sandbox_arguments;
// Notify the tools that the sandbox is going to be turned on.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
// the error message. This function can be overridden by the client.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
const void *end);
} // extern "C"
#endif // SANITIZER_INTERFACE_INTERNAL_H

View File

@ -13,6 +13,10 @@
#include "sanitizer_platform.h" #include "sanitizer_platform.h"
#ifndef SANITIZER_DEBUG
# define SANITIZER_DEBUG 0
#endif
// Only use SANITIZER_*ATTRIBUTE* before the function return type! // Only use SANITIZER_*ATTRIBUTE* before the function return type!
#if SANITIZER_WINDOWS #if SANITIZER_WINDOWS
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport) # define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
@ -26,7 +30,7 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak)) # define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif #endif
#if SANITIZER_LINUX && !defined(SANITIZER_GO) #if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !defined(SANITIZER_GO)
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1 # define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else #else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0 # define SANITIZER_SUPPORTS_WEAK_HOOKS 0
@ -74,13 +78,22 @@ typedef signed char s8;
typedef signed short s16; // NOLINT typedef signed short s16; // NOLINT
typedef signed int s32; typedef signed int s32;
typedef signed long long s64; // NOLINT typedef signed long long s64; // NOLINT
#if SANITIZER_WINDOWS
// On Windows, files are HANDLE, which is a synonim of void*.
// Use void* to avoid including <windows.h> everywhere.
typedef void* fd_t;
typedef unsigned error_t;
#else
typedef int fd_t; typedef int fd_t;
typedef int error_t;
#endif
// WARNING: OFF_T may be different from OS type off_t, depending on the value of // WARNING: OFF_T may be different from OS type off_t, depending on the value of
// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls // _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
// like pread and mmap, as opposed to pread64 and mmap64. // like pread and mmap, as opposed to pread64 and mmap64.
// Mac and Linux/x86-64 are special. // FreeBSD, Mac and Linux/x86-64 are special.
#if SANITIZER_MAC || (SANITIZER_LINUX && defined(__x86_64__)) #if SANITIZER_FREEBSD || SANITIZER_MAC || \
(SANITIZER_LINUX && defined(__x86_64__))
typedef u64 OFF_T; typedef u64 OFF_T;
#else #else
typedef uptr OFF_T; typedef uptr OFF_T;
@ -94,41 +107,6 @@ typedef u32 operator_new_size_type;
#endif #endif
} // namespace __sanitizer } // namespace __sanitizer
extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
// The special values are "stdout" and "stderr".
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
typedef struct {
int coverage_sandboxed;
__sanitizer::sptr coverage_fd;
unsigned int coverage_max_block_size;
} __sanitizer_sandbox_arguments;
// Notify the tools that the sandbox is going to be turned on.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
// the error message. This function can be overridden by the client.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov();
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
const void *end);
} // extern "C"
using namespace __sanitizer; // NOLINT using namespace __sanitizer; // NOLINT
// ----------- ATTENTION ------------- // ----------- ATTENTION -------------
@ -149,7 +127,6 @@ using namespace __sanitizer; // NOLINT
# define NOINLINE __declspec(noinline) # define NOINLINE __declspec(noinline)
# define NORETURN __declspec(noreturn) # define NORETURN __declspec(noreturn)
# define THREADLOCAL __declspec(thread) # define THREADLOCAL __declspec(thread)
# define NOTHROW
# define LIKELY(x) (x) # define LIKELY(x) (x)
# define UNLIKELY(x) (x) # define UNLIKELY(x) (x)
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */ # define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
@ -163,7 +140,6 @@ using namespace __sanitizer; // NOLINT
# define NOINLINE __attribute__((noinline)) # define NOINLINE __attribute__((noinline))
# define NORETURN __attribute__((noreturn)) # define NORETURN __attribute__((noreturn))
# define THREADLOCAL __thread # define THREADLOCAL __thread
# define NOTHROW throw()
# define LIKELY(x) __builtin_expect(!!(x), 1) # define LIKELY(x) __builtin_expect(!!(x), 1)
# define UNLIKELY(x) __builtin_expect(!!(x), 0) # define UNLIKELY(x) __builtin_expect(!!(x), 0)
# if defined(__i386__) || defined(__x86_64__) # if defined(__i386__) || defined(__x86_64__)
@ -182,6 +158,12 @@ using namespace __sanitizer; // NOLINT
# define USED # define USED
#endif #endif
#if !defined(_MSC_VER) || defined(__clang__) || MSC_PREREQ(1900)
# define NOEXCEPT noexcept
#else
# define NOEXCEPT throw()
#endif
// Unaligned versions of basic types. // Unaligned versions of basic types.
typedef ALIGNED(1) u16 uu16; typedef ALIGNED(1) u16 uu16;
typedef ALIGNED(1) u32 uu32; typedef ALIGNED(1) u32 uu32;
@ -238,7 +220,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
#define CHECK_GT(a, b) CHECK_IMPL((a), >, (b)) #define CHECK_GT(a, b) CHECK_IMPL((a), >, (b))
#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b)) #define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))
#if TSAN_DEBUG #if SANITIZER_DEBUG
#define DCHECK(a) CHECK(a) #define DCHECK(a) CHECK(a)
#define DCHECK_EQ(a, b) CHECK_EQ(a, b) #define DCHECK_EQ(a, b) CHECK_EQ(a, b)
#define DCHECK_NE(a, b) CHECK_NE(a, b) #define DCHECK_NE(a, b) CHECK_NE(a, b)
@ -318,4 +300,12 @@ extern "C" void* _ReturnAddress(void);
} while (internal_iserror(res, &rverrno) && rverrno == EINTR); \ } while (internal_iserror(res, &rverrno) && rverrno == EINTR); \
} }
// Forces the compiler to generate a frame pointer in the function.
#define ENABLE_FRAME_POINTER \
do { \
volatile uptr enable_fp; \
enable_fp = GET_CURRENT_FRAME(); \
(void)enable_fp; \
} while (0)
#endif // SANITIZER_DEFS_H #endif // SANITIZER_DEFS_H

View File

@ -47,8 +47,8 @@ struct LFStack {
u64 cmp = atomic_load(&head_, memory_order_acquire); u64 cmp = atomic_load(&head_, memory_order_acquire);
for (;;) { for (;;) {
T *cur = (T*)(uptr)(cmp & kPtrMask); T *cur = (T*)(uptr)(cmp & kPtrMask);
if (cur == 0) if (!cur)
return 0; return nullptr;
T *nxt = cur->next; T *nxt = cur->next;
u64 cnt = (cmp & kCounterMask); u64 cnt = (cmp & kCounterMask);
u64 xch = (u64)(uptr)nxt | cnt; u64 xch = (u64)(uptr)nxt | cnt;
@ -66,6 +66,6 @@ struct LFStack {
atomic_uint64_t head_; atomic_uint64_t head_;
}; };
} // namespace __sanitizer } // namespace __sanitizer
#endif // #ifndef SANITIZER_LFSTACK_H #endif // SANITIZER_LFSTACK_H

View File

@ -8,37 +8,37 @@
// This file is shared between AddressSanitizer and ThreadSanitizer // This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries. See sanitizer_libc.h for details. // run-time libraries. See sanitizer_libc.h for details.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "sanitizer_allocator_internal.h" #include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_libc.h" #include "sanitizer_libc.h"
namespace __sanitizer { namespace __sanitizer {
// Make the compiler think that something is going on there.
static inline void break_optimization(void *arg) {
#if _MSC_VER
// FIXME: make sure this is actually enough.
__asm;
#else
__asm__ __volatile__("" : : "r" (arg) : "memory");
#endif
}
s64 internal_atoll(const char *nptr) { s64 internal_atoll(const char *nptr) {
return internal_simple_strtoll(nptr, (char**)0, 10); return internal_simple_strtoll(nptr, nullptr, 10);
} }
void *internal_memchr(const void *s, int c, uptr n) { void *internal_memchr(const void *s, int c, uptr n) {
const char* t = (char*)s; const char *t = (const char *)s;
for (uptr i = 0; i < n; ++i, ++t) for (uptr i = 0; i < n; ++i, ++t)
if (*t == c) if (*t == c)
return (void*)t; return reinterpret_cast<void *>(const_cast<char *>(t));
return 0; return nullptr;
}
void *internal_memrchr(const void *s, int c, uptr n) {
const char *t = (const char *)s;
void *res = nullptr;
for (uptr i = 0; i < n; ++i, ++t) {
if (*t == c) res = reinterpret_cast<void *>(const_cast<char *>(t));
}
return res;
} }
int internal_memcmp(const void* s1, const void* s2, uptr n) { int internal_memcmp(const void* s1, const void* s2, uptr n) {
const char* t1 = (char*)s1; const char *t1 = (const char *)s1;
const char* t2 = (char*)s2; const char *t2 = (const char *)s2;
for (uptr i = 0; i < n; ++i, ++t1, ++t2) for (uptr i = 0; i < n; ++i, ++t1, ++t2)
if (*t1 != *t2) if (*t1 != *t2)
return *t1 < *t2 ? -1 : 1; return *t1 < *t2 ? -1 : 1;
@ -47,7 +47,7 @@ int internal_memcmp(const void* s1, const void* s2, uptr n) {
void *internal_memcpy(void *dest, const void *src, uptr n) { void *internal_memcpy(void *dest, const void *src, uptr n) {
char *d = (char*)dest; char *d = (char*)dest;
char *s = (char*)src; const char *s = (const char *)src;
for (uptr i = 0; i < n; ++i) for (uptr i = 0; i < n; ++i)
d[i] = s[i]; d[i] = s[i];
return dest; return dest;
@ -55,7 +55,7 @@ void *internal_memcpy(void *dest, const void *src, uptr n) {
void *internal_memmove(void *dest, const void *src, uptr n) { void *internal_memmove(void *dest, const void *src, uptr n) {
char *d = (char*)dest; char *d = (char*)dest;
char *s = (char*)src; const char *s = (const char *)src;
sptr i, signed_n = (sptr)n; sptr i, signed_n = (sptr)n;
CHECK_GE(signed_n, 0); CHECK_GE(signed_n, 0);
if (d < s) { if (d < s) {
@ -76,7 +76,8 @@ void internal_bzero_aligned16(void *s, uptr n) {
CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0); CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0);
for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) { for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) {
p->a = p->b = 0; p->a = p->b = 0;
break_optimization(0); // Make sure this does not become memset. // Make sure this does not become memset.
SanitizerBreakOptimization(nullptr);
} }
} }
@ -95,7 +96,7 @@ void *internal_memset(void* s, int c, uptr n) {
uptr internal_strcspn(const char *s, const char *reject) { uptr internal_strcspn(const char *s, const char *reject) {
uptr i; uptr i;
for (i = 0; s[i]; i++) { for (i = 0; s[i]; i++) {
if (internal_strchr(reject, s[i]) != 0) if (internal_strchr(reject, s[i]))
return i; return i;
} }
return i; return i;
@ -109,6 +110,14 @@ char* internal_strdup(const char *s) {
return s2; return s2;
} }
char* internal_strndup(const char *s, uptr n) {
uptr len = internal_strnlen(s, n);
char *s2 = (char*)InternalAlloc(len + 1);
internal_memcpy(s2, s, len);
s2[len] = 0;
return s2;
}
int internal_strcmp(const char *s1, const char *s2) { int internal_strcmp(const char *s1, const char *s2) {
while (true) { while (true) {
unsigned c1 = *s1; unsigned c1 = *s1;
@ -136,9 +145,9 @@ int internal_strncmp(const char *s1, const char *s2, uptr n) {
char* internal_strchr(const char *s, int c) { char* internal_strchr(const char *s, int c) {
while (true) { while (true) {
if (*s == (char)c) if (*s == (char)c)
return (char*)s; return const_cast<char *>(s);
if (*s == 0) if (*s == 0)
return 0; return nullptr;
s++; s++;
} }
} }
@ -146,16 +155,16 @@ char* internal_strchr(const char *s, int c) {
char *internal_strchrnul(const char *s, int c) { char *internal_strchrnul(const char *s, int c) {
char *res = internal_strchr(s, c); char *res = internal_strchr(s, c);
if (!res) if (!res)
res = (char*)s + internal_strlen(s); res = const_cast<char *>(s) + internal_strlen(s);
return res; return res;
} }
char *internal_strrchr(const char *s, int c) { char *internal_strrchr(const char *s, int c) {
const char *res = 0; const char *res = nullptr;
for (uptr i = 0; s[i]; i++) { for (uptr i = 0; s[i]; i++) {
if (s[i] == c) res = s + i; if (s[i] == c) res = s + i;
} }
return (char*)res; return const_cast<char *>(res);
} }
uptr internal_strlen(const char *s) { uptr internal_strlen(const char *s) {
@ -191,12 +200,12 @@ char *internal_strstr(const char *haystack, const char *needle) {
// This is O(N^2), but we are not using it in hot places. // This is O(N^2), but we are not using it in hot places.
uptr len1 = internal_strlen(haystack); uptr len1 = internal_strlen(haystack);
uptr len2 = internal_strlen(needle); uptr len2 = internal_strlen(needle);
if (len1 < len2) return 0; if (len1 < len2) return nullptr;
for (uptr pos = 0; pos <= len1 - len2; pos++) { for (uptr pos = 0; pos <= len1 - len2; pos++) {
if (internal_memcmp(haystack + pos, needle, len2) == 0) if (internal_memcmp(haystack + pos, needle, len2) == 0)
return (char*)haystack + pos; return const_cast<char *>(haystack) + pos;
} }
return 0; return nullptr;
} }
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) { s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
@ -205,7 +214,7 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
int sgn = 1; int sgn = 1;
u64 res = 0; u64 res = 0;
bool have_digits = false; bool have_digits = false;
char *old_nptr = (char*)nptr; char *old_nptr = const_cast<char *>(nptr);
if (*nptr == '+') { if (*nptr == '+') {
sgn = 1; sgn = 1;
nptr++; nptr++;
@ -220,8 +229,8 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
have_digits = true; have_digits = true;
nptr++; nptr++;
} }
if (endptr != 0) { if (endptr) {
*endptr = (have_digits) ? (char*)nptr : old_nptr; *endptr = (have_digits) ? const_cast<char *>(nptr) : old_nptr;
} }
if (sgn > 0) { if (sgn > 0) {
return (s64)(Min((u64)INT64_MAX, res)); return (s64)(Min((u64)INT64_MAX, res));
@ -249,4 +258,4 @@ bool mem_is_zero(const char *beg, uptr size) {
return all == 0; return all == 0;
} }
} // namespace __sanitizer } // namespace __sanitizer

View File

@ -9,7 +9,9 @@
// run-time libraries. // run-time libraries.
// These tools can not use some of the libc functions directly because those // These tools can not use some of the libc functions directly because those
// functions are intercepted. Instead, we implement a tiny subset of libc here. // functions are intercepted. Instead, we implement a tiny subset of libc here.
// FIXME: Some of functions declared in this file are in fact POSIX, not libc.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifndef SANITIZER_LIBC_H #ifndef SANITIZER_LIBC_H
#define SANITIZER_LIBC_H #define SANITIZER_LIBC_H
@ -24,6 +26,7 @@ namespace __sanitizer {
// String functions // String functions
s64 internal_atoll(const char *nptr); s64 internal_atoll(const char *nptr);
void *internal_memchr(const void *s, int c, uptr n); void *internal_memchr(const void *s, int c, uptr n);
void *internal_memrchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n); int internal_memcmp(const void* s1, const void* s2, uptr n);
void *internal_memcpy(void *dest, const void *src, uptr n); void *internal_memcpy(void *dest, const void *src, uptr n);
void *internal_memmove(void *dest, const void *src, uptr n); void *internal_memmove(void *dest, const void *src, uptr n);
@ -36,6 +39,7 @@ char *internal_strchrnul(const char *s, int c);
int internal_strcmp(const char *s1, const char *s2); int internal_strcmp(const char *s1, const char *s2);
uptr internal_strcspn(const char *s, const char *reject); uptr internal_strcspn(const char *s, const char *reject);
char *internal_strdup(const char *s); char *internal_strdup(const char *s);
char *internal_strndup(const char *s, uptr n);
uptr internal_strlen(const char *s); uptr internal_strlen(const char *s);
char *internal_strncat(char *dst, const char *src, uptr n); char *internal_strncat(char *dst, const char *src, uptr n);
int internal_strncmp(const char *s1, const char *s2, uptr n); int internal_strncmp(const char *s1, const char *s2, uptr n);
@ -52,55 +56,26 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...);
// Optimized for the case when the result is true. // Optimized for the case when the result is true.
bool mem_is_zero(const char *mem, uptr size); bool mem_is_zero(const char *mem, uptr size);
// Memory
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset);
uptr internal_munmap(void *addr, uptr length);
// I/O // I/O
const fd_t kInvalidFd = -1; const fd_t kInvalidFd = (fd_t)-1;
const fd_t kStdinFd = 0; const fd_t kStdinFd = 0;
const fd_t kStdoutFd = 1; const fd_t kStdoutFd = (fd_t)1;
const fd_t kStderrFd = 2; const fd_t kStderrFd = (fd_t)2;
uptr internal_close(fd_t fd);
int internal_isatty(fd_t fd);
// Use __sanitizer::OpenFile() instead.
uptr internal_open(const char *filename, int flags);
uptr internal_open(const char *filename, int flags, u32 mode);
uptr internal_read(fd_t fd, void *buf, uptr count);
uptr internal_write(fd_t fd, const void *buf, uptr count);
uptr internal_ftruncate(fd_t fd, uptr size); uptr internal_ftruncate(fd_t fd, uptr size);
// OS // OS
uptr internal_filesize(fd_t fd); // -1 on error.
uptr internal_stat(const char *path, void *buf);
uptr internal_lstat(const char *path, void *buf);
uptr internal_fstat(fd_t fd, void *buf);
uptr internal_dup2(int oldfd, int newfd);
uptr internal_readlink(const char *path, char *buf, uptr bufsize);
uptr internal_unlink(const char *path);
uptr internal_rename(const char *oldpath, const char *newpath);
void NORETURN internal__exit(int exitcode); void NORETURN internal__exit(int exitcode);
uptr internal_lseek(fd_t fd, OFF_T offset, int whence);
uptr internal_ptrace(int request, int pid, void *addr, void *data);
uptr internal_waitpid(int pid, int *status, int options);
uptr internal_getpid(); uptr internal_getpid();
uptr internal_getppid(); uptr internal_getppid();
int internal_fork();
// Threading // Threading
uptr internal_sched_yield(); uptr internal_sched_yield();
// Error handling // Error handling
bool internal_iserror(uptr retval, int *rverrno = 0); bool internal_iserror(uptr retval, int *rverrno = nullptr);
int internal_sigaction(int signum, const void *act, void *oldact); } // namespace __sanitizer
} // namespace __sanitizer #endif // SANITIZER_LIBC_H
#endif // SANITIZER_LIBC_H

View File

@ -6,10 +6,12 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "sanitizer_platform.h" #include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX #if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_libignore.h" #include "sanitizer_libignore.h"
#include "sanitizer_flags.h" #include "sanitizer_flags.h"
#include "sanitizer_posix.h"
#include "sanitizer_procmaps.h" #include "sanitizer_procmaps.h"
namespace __sanitizer { namespace __sanitizer {
@ -17,35 +19,29 @@ namespace __sanitizer {
LibIgnore::LibIgnore(LinkerInitialized) { LibIgnore::LibIgnore(LinkerInitialized) {
} }
void LibIgnore::Init(const SuppressionContext &supp) { void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
BlockingMutexLock lock(&mutex_); BlockingMutexLock lock(&mutex_);
CHECK_EQ(count_, 0); if (count_ >= kMaxLibs) {
const uptr n = supp.SuppressionCount(); Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
for (uptr i = 0; i < n; i++) { kMaxLibs);
const Suppression *s = supp.SuppressionAt(i); Die();
if (s->type != SuppressionLib)
continue;
if (count_ >= kMaxLibs) {
Report("%s: too many called_from_lib suppressions (max: %d)\n",
SanitizerToolName, kMaxLibs);
Die();
}
Lib *lib = &libs_[count_++];
lib->templ = internal_strdup(s->templ);
lib->name = 0;
lib->loaded = false;
} }
Lib *lib = &libs_[count_++];
lib->templ = internal_strdup(name_templ);
lib->name = nullptr;
lib->real_name = nullptr;
lib->loaded = false;
} }
void LibIgnore::OnLibraryLoaded(const char *name) { void LibIgnore::OnLibraryLoaded(const char *name) {
BlockingMutexLock lock(&mutex_); BlockingMutexLock lock(&mutex_);
// Try to match suppressions with symlink target. // Try to match suppressions with symlink target.
InternalScopedBuffer<char> buf(4096); InternalScopedString buf(kMaxPathLength);
if (name != 0 && internal_readlink(name, buf.data(), buf.size() - 1) > 0 && if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
buf.data()[0]) { buf[0]) {
for (uptr i = 0; i < count_; i++) { for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i]; Lib *lib = &libs_[i];
if (!lib->loaded && lib->real_name == 0 && if (!lib->loaded && (!lib->real_name) &&
TemplateMatch(lib->templ, name)) TemplateMatch(lib->templ, name))
lib->real_name = internal_strdup(buf.data()); lib->real_name = internal_strdup(buf.data());
} }
@ -53,7 +49,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
// Scan suppressions list and find newly loaded and unloaded libraries. // Scan suppressions list and find newly loaded and unloaded libraries.
MemoryMappingLayout proc_maps(/*cache_enabled*/false); MemoryMappingLayout proc_maps(/*cache_enabled*/false);
InternalScopedBuffer<char> module(4096); InternalScopedString module(kMaxPathLength);
for (uptr i = 0; i < count_; i++) { for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i]; Lib *lib = &libs_[i];
bool loaded = false; bool loaded = false;
@ -63,7 +59,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
if ((prot & MemoryMappingLayout::kProtectionExecute) == 0) if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
continue; continue;
if (TemplateMatch(lib->templ, module.data()) || if (TemplateMatch(lib->templ, module.data()) ||
(lib->real_name != 0 && (lib->real_name &&
internal_strcmp(lib->real_name, module.data()) == 0)) { internal_strcmp(lib->real_name, module.data()) == 0)) {
if (loaded) { if (loaded) {
Report("%s: called_from_lib suppression '%s' is matched against" Report("%s: called_from_lib suppression '%s' is matched against"
@ -96,9 +92,9 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
} }
void LibIgnore::OnLibraryUnloaded() { void LibIgnore::OnLibraryUnloaded() {
OnLibraryLoaded(0); OnLibraryLoaded(nullptr);
} }
} // namespace __sanitizer } // namespace __sanitizer
#endif // #if SANITIZER_FREEBSD || SANITIZER_LINUX #endif // #if SANITIZER_FREEBSD || SANITIZER_LINUX

View File

@ -6,8 +6,8 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// //
// LibIgnore allows to ignore all interceptors called from a particular set // LibIgnore allows to ignore all interceptors called from a particular set
// of dynamic libraries. LibIgnore remembers all "called_from_lib" suppressions // of dynamic libraries. LibIgnore can be initialized with several templates
// from the provided SuppressionContext; finds code ranges for the libraries; // of names of libraries to be ignored. It finds code ranges for the libraries;
// and checks whether the provided PC value belongs to the code ranges. // and checks whether the provided PC value belongs to the code ranges.
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -17,7 +17,6 @@
#include "sanitizer_internal_defs.h" #include "sanitizer_internal_defs.h"
#include "sanitizer_common.h" #include "sanitizer_common.h"
#include "sanitizer_suppressions.h"
#include "sanitizer_atomic.h" #include "sanitizer_atomic.h"
#include "sanitizer_mutex.h" #include "sanitizer_mutex.h"
@ -27,8 +26,8 @@ class LibIgnore {
public: public:
explicit LibIgnore(LinkerInitialized); explicit LibIgnore(LinkerInitialized);
// Fetches all "called_from_lib" suppressions from the SuppressionContext. // Must be called during initialization.
void Init(const SuppressionContext &supp); void AddIgnoredLibrary(const char *name_templ);
// Must be called after a new dynamic library is loaded. // Must be called after a new dynamic library is loaded.
void OnLibraryLoaded(const char *name); void OnLibraryLoaded(const char *name);

Some files were not shown because too many files have changed in this diff Show More