mirror of git://gcc.gnu.org/git/gcc.git
libsanitizer mege from upstream r171973
From-SVN: r195083
This commit is contained in:
parent
e1f674e4c2
commit
e9772e16b3
|
|
@ -1,3 +1,7 @@
|
||||||
|
2013-01-10 Kostya Serebryany <kcc@google.com>
|
||||||
|
|
||||||
|
* g++.dg/asan/asan_test.cc: Sync from upstream.
|
||||||
|
|
||||||
2013-01-10 Jakub Jelinek <jakub@redhat.com>
|
2013-01-10 Jakub Jelinek <jakub@redhat.com>
|
||||||
|
|
||||||
PR tree-optimization/55921
|
PR tree-optimization/55921
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,15 @@
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <setjmp.h>
|
#include <setjmp.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
#ifdef __linux__
|
||||||
|
# include <sys/prctl.h>
|
||||||
|
# include <sys/types.h>
|
||||||
|
# include <sys/stat.h>
|
||||||
|
# include <fcntl.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(__i386__) || defined(__x86_64__)
|
#if defined(__i386__) || defined(__x86_64__)
|
||||||
#include <emmintrin.h>
|
#include <emmintrin.h>
|
||||||
|
|
@ -242,7 +251,7 @@ void OOBTest() {
|
||||||
for (int i = 0; i < (int)(size - sizeof(T) + 1); i++)
|
for (int i = 0; i < (int)(size - sizeof(T) + 1); i++)
|
||||||
oob_test<T>(size, i);
|
oob_test<T>(size, i);
|
||||||
|
|
||||||
for (int i = size - sizeof(T) + 1; i <= (int)(size + 3 * sizeof(T)); i++) {
|
for (int i = size - sizeof(T) + 1; i <= (int)(size + 2 * sizeof(T)); i++) {
|
||||||
const char *str =
|
const char *str =
|
||||||
"is located.*%d byte.*to the right";
|
"is located.*%d byte.*to the right";
|
||||||
int off = i >= size ? (i - size) : 0;
|
int off = i >= size ? (i - size) : 0;
|
||||||
|
|
@ -298,6 +307,18 @@ TEST(AddressSanitizer, OOBRightTest) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if ASAN_ALLOCATOR_VERSION == 2 // Broken with the asan_allocator1
|
||||||
|
TEST(AddressSanitizer, LargeOOBRightTest) {
|
||||||
|
size_t large_power_of_two = 1 << 19;
|
||||||
|
for (size_t i = 16; i <= 256; i *= 2) {
|
||||||
|
size_t size = large_power_of_two - i;
|
||||||
|
char *p = Ident(new char[size]);
|
||||||
|
EXPECT_DEATH(p[size] = 0, "is located 0 bytes to the right");
|
||||||
|
delete [] p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif // ASAN_ALLOCATOR_VERSION == 2
|
||||||
|
|
||||||
TEST(AddressSanitizer, UAF_char) {
|
TEST(AddressSanitizer, UAF_char) {
|
||||||
const char *uaf_string = "AddressSanitizer:.*heap-use-after-free";
|
const char *uaf_string = "AddressSanitizer:.*heap-use-after-free";
|
||||||
EXPECT_DEATH(uaf_test<U1>(1, 0), uaf_string);
|
EXPECT_DEATH(uaf_test<U1>(1, 0), uaf_string);
|
||||||
|
|
@ -456,6 +477,24 @@ TEST(AddressSanitizer, HugeMallocTest) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef __APPLE__
|
||||||
|
void MemalignRun(size_t align, size_t size, int idx) {
|
||||||
|
char *p = (char *)memalign(align, size);
|
||||||
|
Ident(p)[idx] = 0;
|
||||||
|
free(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddressSanitizer, memalign) {
|
||||||
|
for (int align = 16; align <= (1 << 23); align *= 2) {
|
||||||
|
size_t size = align * 5;
|
||||||
|
EXPECT_DEATH(MemalignRun(align, size, -1),
|
||||||
|
"is located 1 bytes to the left");
|
||||||
|
EXPECT_DEATH(MemalignRun(align, size, size + 1),
|
||||||
|
"is located 1 bytes to the right");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
TEST(AddressSanitizer, ThreadedMallocStressTest) {
|
TEST(AddressSanitizer, ThreadedMallocStressTest) {
|
||||||
const int kNumThreads = 4;
|
const int kNumThreads = 4;
|
||||||
const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
|
const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
|
||||||
|
|
@ -784,14 +823,39 @@ TEST(AddressSanitizer, Store128Test) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static string RightOOBErrorMessage(int oob_distance) {
|
static string RightOOBErrorMessage(int oob_distance, bool is_write) {
|
||||||
assert(oob_distance >= 0);
|
assert(oob_distance >= 0);
|
||||||
char expected_str[100];
|
char expected_str[100];
|
||||||
sprintf(expected_str, "located %d bytes to the right", oob_distance);
|
sprintf(expected_str, ASAN_PCRE_DOTALL "%s.*located %d bytes to the right",
|
||||||
|
is_write ? "WRITE" : "READ", oob_distance);
|
||||||
return string(expected_str);
|
return string(expected_str);
|
||||||
}
|
}
|
||||||
|
|
||||||
static string LeftOOBErrorMessage(int oob_distance) {
|
static string RightOOBWriteMessage(int oob_distance) {
|
||||||
|
return RightOOBErrorMessage(oob_distance, /*is_write*/true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static string RightOOBReadMessage(int oob_distance) {
|
||||||
|
return RightOOBErrorMessage(oob_distance, /*is_write*/false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static string LeftOOBErrorMessage(int oob_distance, bool is_write) {
|
||||||
|
assert(oob_distance > 0);
|
||||||
|
char expected_str[100];
|
||||||
|
sprintf(expected_str, ASAN_PCRE_DOTALL "%s.*located %d bytes to the left",
|
||||||
|
is_write ? "WRITE" : "READ", oob_distance);
|
||||||
|
return string(expected_str);
|
||||||
|
}
|
||||||
|
|
||||||
|
static string LeftOOBWriteMessage(int oob_distance) {
|
||||||
|
return LeftOOBErrorMessage(oob_distance, /*is_write*/true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static string LeftOOBReadMessage(int oob_distance) {
|
||||||
|
return LeftOOBErrorMessage(oob_distance, /*is_write*/false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static string LeftOOBAccessMessage(int oob_distance) {
|
||||||
assert(oob_distance > 0);
|
assert(oob_distance > 0);
|
||||||
char expected_str[100];
|
char expected_str[100];
|
||||||
sprintf(expected_str, "located %d bytes to the left", oob_distance);
|
sprintf(expected_str, "located %d bytes to the left", oob_distance);
|
||||||
|
|
@ -805,44 +869,48 @@ void MemSetOOBTestTemplate(size_t length) {
|
||||||
T *array = Ident((T*)malloc(size));
|
T *array = Ident((T*)malloc(size));
|
||||||
int element = Ident(42);
|
int element = Ident(42);
|
||||||
int zero = Ident(0);
|
int zero = Ident(0);
|
||||||
|
void *(*MEMSET)(void *s, int c, size_t n) = Ident(memset);
|
||||||
// memset interval inside array
|
// memset interval inside array
|
||||||
memset(array, element, size);
|
MEMSET(array, element, size);
|
||||||
memset(array, element, size - 1);
|
MEMSET(array, element, size - 1);
|
||||||
memset(array + length - 1, element, sizeof(T));
|
MEMSET(array + length - 1, element, sizeof(T));
|
||||||
memset(array, element, 1);
|
MEMSET(array, element, 1);
|
||||||
|
|
||||||
// memset 0 bytes
|
// memset 0 bytes
|
||||||
memset(array - 10, element, zero);
|
MEMSET(array - 10, element, zero);
|
||||||
memset(array - 1, element, zero);
|
MEMSET(array - 1, element, zero);
|
||||||
memset(array, element, zero);
|
MEMSET(array, element, zero);
|
||||||
memset(array + length, 0, zero);
|
MEMSET(array + length, 0, zero);
|
||||||
memset(array + length + 1, 0, zero);
|
MEMSET(array + length + 1, 0, zero);
|
||||||
|
|
||||||
// try to memset bytes to the right of array
|
// try to memset bytes to the right of array
|
||||||
EXPECT_DEATH(memset(array, 0, size + 1),
|
EXPECT_DEATH(MEMSET(array, 0, size + 1),
|
||||||
RightOOBErrorMessage(0));
|
RightOOBWriteMessage(0));
|
||||||
EXPECT_DEATH(memset((char*)(array + length) - 1, element, 6),
|
EXPECT_DEATH(MEMSET((char*)(array + length) - 1, element, 6),
|
||||||
RightOOBErrorMessage(4));
|
RightOOBWriteMessage(0));
|
||||||
EXPECT_DEATH(memset(array + 1, element, size + sizeof(T)),
|
EXPECT_DEATH(MEMSET(array + 1, element, size + sizeof(T)),
|
||||||
RightOOBErrorMessage(2 * sizeof(T) - 1));
|
RightOOBWriteMessage(0));
|
||||||
// whole interval is to the right
|
// whole interval is to the right
|
||||||
EXPECT_DEATH(memset(array + length + 1, 0, 10),
|
EXPECT_DEATH(MEMSET(array + length + 1, 0, 10),
|
||||||
RightOOBErrorMessage(sizeof(T)));
|
RightOOBWriteMessage(sizeof(T)));
|
||||||
|
|
||||||
// try to memset bytes to the left of array
|
// try to memset bytes to the left of array
|
||||||
EXPECT_DEATH(memset((char*)array - 1, element, size),
|
EXPECT_DEATH(MEMSET((char*)array - 1, element, size),
|
||||||
LeftOOBErrorMessage(1));
|
LeftOOBWriteMessage(1));
|
||||||
EXPECT_DEATH(memset((char*)array - 5, 0, 6),
|
EXPECT_DEATH(MEMSET((char*)array - 5, 0, 6),
|
||||||
LeftOOBErrorMessage(5));
|
LeftOOBWriteMessage(5));
|
||||||
EXPECT_DEATH(memset(array - 5, element, size + 5 * sizeof(T)),
|
if (length >= 100) {
|
||||||
LeftOOBErrorMessage(5 * sizeof(T)));
|
// Large OOB, we find it only if the redzone is large enough.
|
||||||
|
EXPECT_DEATH(memset(array - 5, element, size + 5 * sizeof(T)),
|
||||||
|
LeftOOBWriteMessage(5 * sizeof(T)));
|
||||||
|
}
|
||||||
// whole interval is to the left
|
// whole interval is to the left
|
||||||
EXPECT_DEATH(memset(array - 2, 0, sizeof(T)),
|
EXPECT_DEATH(MEMSET(array - 2, 0, sizeof(T)),
|
||||||
LeftOOBErrorMessage(2 * sizeof(T)));
|
LeftOOBWriteMessage(2 * sizeof(T)));
|
||||||
|
|
||||||
// try to memset bytes both to the left & to the right
|
// try to memset bytes both to the left & to the right
|
||||||
EXPECT_DEATH(memset((char*)array - 2, element, size + 4),
|
EXPECT_DEATH(MEMSET((char*)array - 2, element, size + 4),
|
||||||
LeftOOBErrorMessage(2));
|
LeftOOBWriteMessage(2));
|
||||||
|
|
||||||
free(array);
|
free(array);
|
||||||
}
|
}
|
||||||
|
|
@ -854,6 +922,51 @@ TEST(AddressSanitizer, MemSetOOBTest) {
|
||||||
// We can test arrays of structres/classes here, but what for?
|
// We can test arrays of structres/classes here, but what for?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try to allocate two arrays of 'size' bytes that are near each other.
|
||||||
|
// Strictly speaking we are not guaranteed to find such two pointers,
|
||||||
|
// but given the structure of asan's allocator we will.
|
||||||
|
static bool AllocateTwoAdjacentArrays(char **x1, char **x2, size_t size) {
|
||||||
|
vector<char *> v;
|
||||||
|
bool res = false;
|
||||||
|
for (size_t i = 0; i < 1000U && !res; i++) {
|
||||||
|
v.push_back(new char[size]);
|
||||||
|
if (i == 0) continue;
|
||||||
|
sort(v.begin(), v.end());
|
||||||
|
for (size_t j = 1; j < v.size(); j++) {
|
||||||
|
assert(v[j] > v[j-1]);
|
||||||
|
if ((size_t)(v[j] - v[j-1]) < size * 2) {
|
||||||
|
*x2 = v[j];
|
||||||
|
*x1 = v[j-1];
|
||||||
|
res = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 0; i < v.size(); i++) {
|
||||||
|
if (res && v[i] == *x1) continue;
|
||||||
|
if (res && v[i] == *x2) continue;
|
||||||
|
delete [] v[i];
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddressSanitizer, LargeOOBInMemset) {
|
||||||
|
for (size_t size = 200; size < 100000; size += size / 2) {
|
||||||
|
char *x1, *x2;
|
||||||
|
if (!Ident(AllocateTwoAdjacentArrays)(&x1, &x2, size))
|
||||||
|
continue;
|
||||||
|
// fprintf(stderr, " large oob memset: %p %p %zd\n", x1, x2, size);
|
||||||
|
// Do a memset on x1 with huge out-of-bound access that will end up in x2.
|
||||||
|
EXPECT_DEATH(Ident(memset)(x1, 0, size * 2),
|
||||||
|
"is located 0 bytes to the right");
|
||||||
|
delete [] x1;
|
||||||
|
delete [] x2;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
assert(0 && "Did not find two adjacent malloc-ed pointers");
|
||||||
|
}
|
||||||
|
|
||||||
// Same test for memcpy and memmove functions
|
// Same test for memcpy and memmove functions
|
||||||
template <typename T, class M>
|
template <typename T, class M>
|
||||||
void MemTransferOOBTestTemplate(size_t length) {
|
void MemTransferOOBTestTemplate(size_t length) {
|
||||||
|
|
@ -877,27 +990,27 @@ void MemTransferOOBTestTemplate(size_t length) {
|
||||||
|
|
||||||
// try to change mem to the right of dest
|
// try to change mem to the right of dest
|
||||||
EXPECT_DEATH(M::transfer(dest + 1, src, size),
|
EXPECT_DEATH(M::transfer(dest + 1, src, size),
|
||||||
RightOOBErrorMessage(sizeof(T) - 1));
|
RightOOBWriteMessage(0));
|
||||||
EXPECT_DEATH(M::transfer((char*)(dest + length) - 1, src, 5),
|
EXPECT_DEATH(M::transfer((char*)(dest + length) - 1, src, 5),
|
||||||
RightOOBErrorMessage(3));
|
RightOOBWriteMessage(0));
|
||||||
|
|
||||||
// try to change mem to the left of dest
|
// try to change mem to the left of dest
|
||||||
EXPECT_DEATH(M::transfer(dest - 2, src, size),
|
EXPECT_DEATH(M::transfer(dest - 2, src, size),
|
||||||
LeftOOBErrorMessage(2 * sizeof(T)));
|
LeftOOBWriteMessage(2 * sizeof(T)));
|
||||||
EXPECT_DEATH(M::transfer((char*)dest - 3, src, 4),
|
EXPECT_DEATH(M::transfer((char*)dest - 3, src, 4),
|
||||||
LeftOOBErrorMessage(3));
|
LeftOOBWriteMessage(3));
|
||||||
|
|
||||||
// try to access mem to the right of src
|
// try to access mem to the right of src
|
||||||
EXPECT_DEATH(M::transfer(dest, src + 2, size),
|
EXPECT_DEATH(M::transfer(dest, src + 2, size),
|
||||||
RightOOBErrorMessage(2 * sizeof(T) - 1));
|
RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(M::transfer(dest, (char*)(src + length) - 3, 6),
|
EXPECT_DEATH(M::transfer(dest, (char*)(src + length) - 3, 6),
|
||||||
RightOOBErrorMessage(2));
|
RightOOBReadMessage(0));
|
||||||
|
|
||||||
// try to access mem to the left of src
|
// try to access mem to the left of src
|
||||||
EXPECT_DEATH(M::transfer(dest, src - 1, size),
|
EXPECT_DEATH(M::transfer(dest, src - 1, size),
|
||||||
LeftOOBErrorMessage(sizeof(T)));
|
LeftOOBReadMessage(sizeof(T)));
|
||||||
EXPECT_DEATH(M::transfer(dest, (char*)src - 6, 7),
|
EXPECT_DEATH(M::transfer(dest, (char*)src - 6, 7),
|
||||||
LeftOOBErrorMessage(6));
|
LeftOOBReadMessage(6));
|
||||||
|
|
||||||
// Generally we don't need to test cases where both accessing src and writing
|
// Generally we don't need to test cases where both accessing src and writing
|
||||||
// to dest address to poisoned memory.
|
// to dest address to poisoned memory.
|
||||||
|
|
@ -906,10 +1019,10 @@ void MemTransferOOBTestTemplate(size_t length) {
|
||||||
T *big_dest = Ident((T*)malloc(size * 2));
|
T *big_dest = Ident((T*)malloc(size * 2));
|
||||||
// try to change mem to both sides of dest
|
// try to change mem to both sides of dest
|
||||||
EXPECT_DEATH(M::transfer(dest - 1, big_src, size * 2),
|
EXPECT_DEATH(M::transfer(dest - 1, big_src, size * 2),
|
||||||
LeftOOBErrorMessage(sizeof(T)));
|
LeftOOBWriteMessage(sizeof(T)));
|
||||||
// try to access mem to both sides of src
|
// try to access mem to both sides of src
|
||||||
EXPECT_DEATH(M::transfer(big_dest, src - 2, size * 2),
|
EXPECT_DEATH(M::transfer(big_dest, src - 2, size * 2),
|
||||||
LeftOOBErrorMessage(2 * sizeof(T)));
|
LeftOOBReadMessage(2 * sizeof(T)));
|
||||||
|
|
||||||
free(src);
|
free(src);
|
||||||
free(dest);
|
free(dest);
|
||||||
|
|
@ -920,7 +1033,7 @@ void MemTransferOOBTestTemplate(size_t length) {
|
||||||
class MemCpyWrapper {
|
class MemCpyWrapper {
|
||||||
public:
|
public:
|
||||||
static void* transfer(void *to, const void *from, size_t size) {
|
static void* transfer(void *to, const void *from, size_t size) {
|
||||||
return memcpy(to, from, size);
|
return Ident(memcpy)(to, from, size);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
TEST(AddressSanitizer, MemCpyOOBTest) {
|
TEST(AddressSanitizer, MemCpyOOBTest) {
|
||||||
|
|
@ -931,7 +1044,7 @@ TEST(AddressSanitizer, MemCpyOOBTest) {
|
||||||
class MemMoveWrapper {
|
class MemMoveWrapper {
|
||||||
public:
|
public:
|
||||||
static void* transfer(void *to, const void *from, size_t size) {
|
static void* transfer(void *to, const void *from, size_t size) {
|
||||||
return memmove(to, from, size);
|
return Ident(memmove)(to, from, size);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
TEST(AddressSanitizer, MemMoveOOBTest) {
|
TEST(AddressSanitizer, MemMoveOOBTest) {
|
||||||
|
|
@ -958,15 +1071,15 @@ void StrLenOOBTestTemplate(char *str, size_t length, bool is_global) {
|
||||||
// Arg of strlen is not malloced, OOB access
|
// Arg of strlen is not malloced, OOB access
|
||||||
if (!is_global) {
|
if (!is_global) {
|
||||||
// We don't insert RedZones to the left of global variables
|
// We don't insert RedZones to the left of global variables
|
||||||
EXPECT_DEATH(Ident(strlen(str - 1)), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(strlen(str - 1)), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(strlen(str - 5)), LeftOOBErrorMessage(5));
|
EXPECT_DEATH(Ident(strlen(str - 5)), LeftOOBReadMessage(5));
|
||||||
}
|
}
|
||||||
EXPECT_DEATH(Ident(strlen(str + length + 1)), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strlen(str + length + 1)), RightOOBReadMessage(0));
|
||||||
// Overwrite terminator
|
// Overwrite terminator
|
||||||
str[length] = 'a';
|
str[length] = 'a';
|
||||||
// String is not zero-terminated, strlen will lead to OOB access
|
// String is not zero-terminated, strlen will lead to OOB access
|
||||||
EXPECT_DEATH(Ident(strlen(str)), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strlen(str)), RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Ident(strlen(str + length)), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strlen(str + length)), RightOOBReadMessage(0));
|
||||||
// Restore terminator
|
// Restore terminator
|
||||||
str[length] = 0;
|
str[length] = 0;
|
||||||
}
|
}
|
||||||
|
|
@ -1010,11 +1123,11 @@ TEST(AddressSanitizer, StrNLenOOBTest) {
|
||||||
str[size - 1] = '\0';
|
str[size - 1] = '\0';
|
||||||
Ident(strnlen(str, 2 * size));
|
Ident(strnlen(str, 2 * size));
|
||||||
// Argument points to not allocated memory.
|
// Argument points to not allocated memory.
|
||||||
EXPECT_DEATH(Ident(strnlen(str - 1, 1)), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(strnlen(str - 1, 1)), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(strnlen(str + size, 1)), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strnlen(str + size, 1)), RightOOBReadMessage(0));
|
||||||
// Overwrite the terminating '\0' and hit unallocated memory.
|
// Overwrite the terminating '\0' and hit unallocated memory.
|
||||||
str[size - 1] = 'z';
|
str[size - 1] = 'z';
|
||||||
EXPECT_DEATH(Ident(strnlen(str, size + 1)), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strnlen(str, size + 1)), RightOOBReadMessage(0));
|
||||||
free(str);
|
free(str);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
@ -1030,11 +1143,11 @@ TEST(AddressSanitizer, StrDupOOBTest) {
|
||||||
new_str = strdup(str + size - 1);
|
new_str = strdup(str + size - 1);
|
||||||
free(new_str);
|
free(new_str);
|
||||||
// Argument points to not allocated memory.
|
// Argument points to not allocated memory.
|
||||||
EXPECT_DEATH(Ident(strdup(str - 1)), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(strdup(str - 1)), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(strdup(str + size)), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strdup(str + size)), RightOOBReadMessage(0));
|
||||||
// Overwrite the terminating '\0' and hit unallocated memory.
|
// Overwrite the terminating '\0' and hit unallocated memory.
|
||||||
str[size - 1] = 'z';
|
str[size - 1] = 'z';
|
||||||
EXPECT_DEATH(Ident(strdup(str)), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strdup(str)), RightOOBReadMessage(0));
|
||||||
free(str);
|
free(str);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1048,15 +1161,15 @@ TEST(AddressSanitizer, StrCpyOOBTest) {
|
||||||
strcpy(to, from);
|
strcpy(to, from);
|
||||||
strcpy(to + to_size - from_size, from);
|
strcpy(to + to_size - from_size, from);
|
||||||
// Length of "from" is too small.
|
// Length of "from" is too small.
|
||||||
EXPECT_DEATH(Ident(strcpy(from, "hello2")), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strcpy(from, "hello2")), RightOOBWriteMessage(0));
|
||||||
// "to" or "from" points to not allocated memory.
|
// "to" or "from" points to not allocated memory.
|
||||||
EXPECT_DEATH(Ident(strcpy(to - 1, from)), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(strcpy(to - 1, from)), LeftOOBWriteMessage(1));
|
||||||
EXPECT_DEATH(Ident(strcpy(to, from - 1)), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(strcpy(to, from - 1)), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(strcpy(to, from + from_size)), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strcpy(to, from + from_size)), RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Ident(strcpy(to + to_size, from)), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strcpy(to + to_size, from)), RightOOBWriteMessage(0));
|
||||||
// Overwrite the terminating '\0' character and hit unallocated memory.
|
// Overwrite the terminating '\0' character and hit unallocated memory.
|
||||||
from[from_size - 1] = '!';
|
from[from_size - 1] = '!';
|
||||||
EXPECT_DEATH(Ident(strcpy(to, from)), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(strcpy(to, from)), RightOOBReadMessage(0));
|
||||||
free(to);
|
free(to);
|
||||||
free(from);
|
free(from);
|
||||||
}
|
}
|
||||||
|
|
@ -1078,25 +1191,25 @@ TEST(AddressSanitizer, StrNCpyOOBTest) {
|
||||||
strncpy(to + to_size - 1, from, 1);
|
strncpy(to + to_size - 1, from, 1);
|
||||||
// One of {to, from} points to not allocated memory
|
// One of {to, from} points to not allocated memory
|
||||||
EXPECT_DEATH(Ident(strncpy(to, from - 1, from_size)),
|
EXPECT_DEATH(Ident(strncpy(to, from - 1, from_size)),
|
||||||
LeftOOBErrorMessage(1));
|
LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(strncpy(to - 1, from, from_size)),
|
EXPECT_DEATH(Ident(strncpy(to - 1, from, from_size)),
|
||||||
LeftOOBErrorMessage(1));
|
LeftOOBWriteMessage(1));
|
||||||
EXPECT_DEATH(Ident(strncpy(to, from + from_size, 1)),
|
EXPECT_DEATH(Ident(strncpy(to, from + from_size, 1)),
|
||||||
RightOOBErrorMessage(0));
|
RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Ident(strncpy(to + to_size, from, 1)),
|
EXPECT_DEATH(Ident(strncpy(to + to_size, from, 1)),
|
||||||
RightOOBErrorMessage(0));
|
RightOOBWriteMessage(0));
|
||||||
// Length of "to" is too small
|
// Length of "to" is too small
|
||||||
EXPECT_DEATH(Ident(strncpy(to + to_size - from_size + 1, from, from_size)),
|
EXPECT_DEATH(Ident(strncpy(to + to_size - from_size + 1, from, from_size)),
|
||||||
RightOOBErrorMessage(0));
|
RightOOBWriteMessage(0));
|
||||||
EXPECT_DEATH(Ident(strncpy(to + 1, from, to_size)),
|
EXPECT_DEATH(Ident(strncpy(to + 1, from, to_size)),
|
||||||
RightOOBErrorMessage(0));
|
RightOOBWriteMessage(0));
|
||||||
// Overwrite terminator in from
|
// Overwrite terminator in from
|
||||||
from[from_size - 1] = '!';
|
from[from_size - 1] = '!';
|
||||||
// normal strncpy call
|
// normal strncpy call
|
||||||
strncpy(to, from, from_size);
|
strncpy(to, from, from_size);
|
||||||
// Length of "from" is too small
|
// Length of "from" is too small
|
||||||
EXPECT_DEATH(Ident(strncpy(to, from, to_size)),
|
EXPECT_DEATH(Ident(strncpy(to, from, to_size)),
|
||||||
RightOOBErrorMessage(0));
|
RightOOBReadMessage(0));
|
||||||
free(to);
|
free(to);
|
||||||
free(from);
|
free(from);
|
||||||
}
|
}
|
||||||
|
|
@ -1117,11 +1230,11 @@ USED static void RunStrChrTest(PointerToStrChr1 StrChr) {
|
||||||
EXPECT_EQ(str + 10, StrChr(str, 'q'));
|
EXPECT_EQ(str + 10, StrChr(str, 'q'));
|
||||||
EXPECT_EQ(NULL, StrChr(str, 'a'));
|
EXPECT_EQ(NULL, StrChr(str, 'a'));
|
||||||
// StrChr argument points to not allocated memory.
|
// StrChr argument points to not allocated memory.
|
||||||
EXPECT_DEATH(Ident(StrChr(str - 1, 'z')), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(StrChr(str - 1, 'z')), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(StrChr(str + size, 'z')), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrChr(str + size, 'z')), RightOOBReadMessage(0));
|
||||||
// Overwrite the terminator and hit not allocated memory.
|
// Overwrite the terminator and hit not allocated memory.
|
||||||
str[11] = 'z';
|
str[11] = 'z';
|
||||||
EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBReadMessage(0));
|
||||||
free(str);
|
free(str);
|
||||||
}
|
}
|
||||||
USED static void RunStrChrTest(PointerToStrChr2 StrChr) {
|
USED static void RunStrChrTest(PointerToStrChr2 StrChr) {
|
||||||
|
|
@ -1133,11 +1246,11 @@ USED static void RunStrChrTest(PointerToStrChr2 StrChr) {
|
||||||
EXPECT_EQ(str + 10, StrChr(str, 'q'));
|
EXPECT_EQ(str + 10, StrChr(str, 'q'));
|
||||||
EXPECT_EQ(NULL, StrChr(str, 'a'));
|
EXPECT_EQ(NULL, StrChr(str, 'a'));
|
||||||
// StrChr argument points to not allocated memory.
|
// StrChr argument points to not allocated memory.
|
||||||
EXPECT_DEATH(Ident(StrChr(str - 1, 'z')), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(StrChr(str - 1, 'z')), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(StrChr(str + size, 'z')), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrChr(str + size, 'z')), RightOOBReadMessage(0));
|
||||||
// Overwrite the terminator and hit not allocated memory.
|
// Overwrite the terminator and hit not allocated memory.
|
||||||
str[11] = 'z';
|
str[11] = 'z';
|
||||||
EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBReadMessage(0));
|
||||||
free(str);
|
free(str);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1198,8 +1311,9 @@ TEST(AddressSanitizer, StrCmpAndFriendsLogicTest) {
|
||||||
typedef int(*PointerToStrCmp)(const char*, const char*);
|
typedef int(*PointerToStrCmp)(const char*, const char*);
|
||||||
void RunStrCmpTest(PointerToStrCmp StrCmp) {
|
void RunStrCmpTest(PointerToStrCmp StrCmp) {
|
||||||
size_t size = Ident(100);
|
size_t size = Ident(100);
|
||||||
char *s1 = MallocAndMemsetString(size);
|
int fill = 'o';
|
||||||
char *s2 = MallocAndMemsetString(size);
|
char *s1 = MallocAndMemsetString(size, fill);
|
||||||
|
char *s2 = MallocAndMemsetString(size, fill);
|
||||||
s1[size - 1] = '\0';
|
s1[size - 1] = '\0';
|
||||||
s2[size - 1] = '\0';
|
s2[size - 1] = '\0';
|
||||||
// Normal StrCmp calls
|
// Normal StrCmp calls
|
||||||
|
|
@ -1210,14 +1324,14 @@ void RunStrCmpTest(PointerToStrCmp StrCmp) {
|
||||||
s2[size - 1] = 'x';
|
s2[size - 1] = 'x';
|
||||||
Ident(StrCmp(s1, s2));
|
Ident(StrCmp(s1, s2));
|
||||||
// One of arguments points to not allocated memory.
|
// One of arguments points to not allocated memory.
|
||||||
EXPECT_DEATH(Ident(StrCmp)(s1 - 1, s2), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(StrCmp)(s1 - 1, s2), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(StrCmp)(s1, s2 - 1), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(StrCmp)(s1, s2 - 1), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(StrCmp)(s1 + size, s2), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrCmp)(s1 + size, s2), RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Ident(StrCmp)(s1, s2 + size), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrCmp)(s1, s2 + size), RightOOBReadMessage(0));
|
||||||
// Hit unallocated memory and die.
|
// Hit unallocated memory and die.
|
||||||
s2[size - 1] = 'z';
|
s1[size - 1] = fill;
|
||||||
EXPECT_DEATH(Ident(StrCmp)(s1, s1), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrCmp)(s1, s1), RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Ident(StrCmp)(s1 + size - 1, s2), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrCmp)(s1 + size - 1, s2), RightOOBReadMessage(0));
|
||||||
free(s1);
|
free(s1);
|
||||||
free(s2);
|
free(s2);
|
||||||
}
|
}
|
||||||
|
|
@ -1246,13 +1360,13 @@ void RunStrNCmpTest(PointerToStrNCmp StrNCmp) {
|
||||||
Ident(StrNCmp(s1 - 1, s2 - 1, 0));
|
Ident(StrNCmp(s1 - 1, s2 - 1, 0));
|
||||||
Ident(StrNCmp(s1 + size - 1, s2 + size - 1, 1));
|
Ident(StrNCmp(s1 + size - 1, s2 + size - 1, 1));
|
||||||
// One of arguments points to not allocated memory.
|
// One of arguments points to not allocated memory.
|
||||||
EXPECT_DEATH(Ident(StrNCmp)(s1 - 1, s2, 1), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(StrNCmp)(s1 - 1, s2, 1), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(StrNCmp)(s1, s2 - 1, 1), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(StrNCmp)(s1, s2 - 1, 1), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(StrNCmp)(s1 + size, s2, 1), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrNCmp)(s1 + size, s2, 1), RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Ident(StrNCmp)(s1, s2 + size, 1), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrNCmp)(s1, s2 + size, 1), RightOOBReadMessage(0));
|
||||||
// Hit unallocated memory and die.
|
// Hit unallocated memory and die.
|
||||||
EXPECT_DEATH(Ident(StrNCmp)(s1 + 1, s2 + 1, size), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrNCmp)(s1 + 1, s2 + 1, size), RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Ident(StrNCmp)(s1 + size - 1, s2, 2), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(StrNCmp)(s1 + size - 1, s2, 2), RightOOBReadMessage(0));
|
||||||
free(s1);
|
free(s1);
|
||||||
free(s2);
|
free(s2);
|
||||||
}
|
}
|
||||||
|
|
@ -1274,22 +1388,23 @@ TEST(AddressSanitizer, MemCmpOOBTest) {
|
||||||
Ident(memcmp(s1 + size - 1, s2 + size - 1, 1));
|
Ident(memcmp(s1 + size - 1, s2 + size - 1, 1));
|
||||||
Ident(memcmp(s1 - 1, s2 - 1, 0));
|
Ident(memcmp(s1 - 1, s2 - 1, 0));
|
||||||
// One of arguments points to not allocated memory.
|
// One of arguments points to not allocated memory.
|
||||||
EXPECT_DEATH(Ident(memcmp)(s1 - 1, s2, 1), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(memcmp)(s1 - 1, s2, 1), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(memcmp)(s1, s2 - 1, 1), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Ident(memcmp)(s1, s2 - 1, 1), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Ident(memcmp)(s1 + size, s2, 1), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(memcmp)(s1 + size, s2, 1), RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Ident(memcmp)(s1, s2 + size, 1), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(memcmp)(s1, s2 + size, 1), RightOOBReadMessage(0));
|
||||||
// Hit unallocated memory and die.
|
// Hit unallocated memory and die.
|
||||||
EXPECT_DEATH(Ident(memcmp)(s1 + 1, s2 + 1, size), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(memcmp)(s1 + 1, s2 + 1, size), RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Ident(memcmp)(s1 + size - 1, s2, 2), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(memcmp)(s1 + size - 1, s2, 2), RightOOBReadMessage(0));
|
||||||
// Zero bytes are not terminators and don't prevent from OOB.
|
// Zero bytes are not terminators and don't prevent from OOB.
|
||||||
s1[size - 1] = '\0';
|
s1[size - 1] = '\0';
|
||||||
s2[size - 1] = '\0';
|
s2[size - 1] = '\0';
|
||||||
EXPECT_DEATH(Ident(memcmp)(s1, s2, size + 1), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Ident(memcmp)(s1, s2, size + 1), RightOOBReadMessage(0));
|
||||||
free(s1);
|
free(s1);
|
||||||
free(s2);
|
free(s2);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AddressSanitizer, StrCatOOBTest) {
|
TEST(AddressSanitizer, StrCatOOBTest) {
|
||||||
|
// strcat() reads strlen(to) bytes from |to| before concatenating.
|
||||||
size_t to_size = Ident(100);
|
size_t to_size = Ident(100);
|
||||||
char *to = MallocAndMemsetString(to_size);
|
char *to = MallocAndMemsetString(to_size);
|
||||||
to[0] = '\0';
|
to[0] = '\0';
|
||||||
|
|
@ -1302,23 +1417,23 @@ TEST(AddressSanitizer, StrCatOOBTest) {
|
||||||
strcat(to + from_size, from + from_size - 2);
|
strcat(to + from_size, from + from_size - 2);
|
||||||
// Passing an invalid pointer is an error even when concatenating an empty
|
// Passing an invalid pointer is an error even when concatenating an empty
|
||||||
// string.
|
// string.
|
||||||
EXPECT_DEATH(strcat(to - 1, from + from_size - 1), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(strcat(to - 1, from + from_size - 1), LeftOOBAccessMessage(1));
|
||||||
// One of arguments points to not allocated memory.
|
// One of arguments points to not allocated memory.
|
||||||
EXPECT_DEATH(strcat(to - 1, from), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(strcat(to - 1, from), LeftOOBAccessMessage(1));
|
||||||
EXPECT_DEATH(strcat(to, from - 1), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(strcat(to, from - 1), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(strcat(to + to_size, from), RightOOBErrorMessage(0));
|
EXPECT_DEATH(strcat(to + to_size, from), RightOOBWriteMessage(0));
|
||||||
EXPECT_DEATH(strcat(to, from + from_size), RightOOBErrorMessage(0));
|
EXPECT_DEATH(strcat(to, from + from_size), RightOOBReadMessage(0));
|
||||||
|
|
||||||
// "from" is not zero-terminated.
|
// "from" is not zero-terminated.
|
||||||
from[from_size - 1] = 'z';
|
from[from_size - 1] = 'z';
|
||||||
EXPECT_DEATH(strcat(to, from), RightOOBErrorMessage(0));
|
EXPECT_DEATH(strcat(to, from), RightOOBReadMessage(0));
|
||||||
from[from_size - 1] = '\0';
|
from[from_size - 1] = '\0';
|
||||||
// "to" is not zero-terminated.
|
// "to" is not zero-terminated.
|
||||||
memset(to, 'z', to_size);
|
memset(to, 'z', to_size);
|
||||||
EXPECT_DEATH(strcat(to, from), RightOOBErrorMessage(0));
|
EXPECT_DEATH(strcat(to, from), RightOOBWriteMessage(0));
|
||||||
// "to" is too short to fit "from".
|
// "to" is too short to fit "from".
|
||||||
to[to_size - from_size + 1] = '\0';
|
to[to_size - from_size + 1] = '\0';
|
||||||
EXPECT_DEATH(strcat(to, from), RightOOBErrorMessage(0));
|
EXPECT_DEATH(strcat(to, from), RightOOBWriteMessage(0));
|
||||||
// length of "to" is just enough.
|
// length of "to" is just enough.
|
||||||
strcat(to, from + 1);
|
strcat(to, from + 1);
|
||||||
|
|
||||||
|
|
@ -1327,6 +1442,7 @@ TEST(AddressSanitizer, StrCatOOBTest) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AddressSanitizer, StrNCatOOBTest) {
|
TEST(AddressSanitizer, StrNCatOOBTest) {
|
||||||
|
// strncat() reads strlen(to) bytes from |to| before concatenating.
|
||||||
size_t to_size = Ident(100);
|
size_t to_size = Ident(100);
|
||||||
char *to = MallocAndMemsetString(to_size);
|
char *to = MallocAndMemsetString(to_size);
|
||||||
to[0] = '\0';
|
to[0] = '\0';
|
||||||
|
|
@ -1338,25 +1454,25 @@ TEST(AddressSanitizer, StrNCatOOBTest) {
|
||||||
from[from_size - 1] = '\0';
|
from[from_size - 1] = '\0';
|
||||||
strncat(to, from, 2 * from_size);
|
strncat(to, from, 2 * from_size);
|
||||||
// Catenating empty string with an invalid string is still an error.
|
// Catenating empty string with an invalid string is still an error.
|
||||||
EXPECT_DEATH(strncat(to - 1, from, 0), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(strncat(to - 1, from, 0), LeftOOBAccessMessage(1));
|
||||||
strncat(to, from + from_size - 1, 10);
|
strncat(to, from + from_size - 1, 10);
|
||||||
// One of arguments points to not allocated memory.
|
// One of arguments points to not allocated memory.
|
||||||
EXPECT_DEATH(strncat(to - 1, from, 2), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(strncat(to - 1, from, 2), LeftOOBAccessMessage(1));
|
||||||
EXPECT_DEATH(strncat(to, from - 1, 2), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(strncat(to, from - 1, 2), LeftOOBReadMessage(1));
|
||||||
EXPECT_DEATH(strncat(to + to_size, from, 2), RightOOBErrorMessage(0));
|
EXPECT_DEATH(strncat(to + to_size, from, 2), RightOOBWriteMessage(0));
|
||||||
EXPECT_DEATH(strncat(to, from + from_size, 2), RightOOBErrorMessage(0));
|
EXPECT_DEATH(strncat(to, from + from_size, 2), RightOOBReadMessage(0));
|
||||||
|
|
||||||
memset(from, 'z', from_size);
|
memset(from, 'z', from_size);
|
||||||
memset(to, 'z', to_size);
|
memset(to, 'z', to_size);
|
||||||
to[0] = '\0';
|
to[0] = '\0';
|
||||||
// "from" is too short.
|
// "from" is too short.
|
||||||
EXPECT_DEATH(strncat(to, from, from_size + 1), RightOOBErrorMessage(0));
|
EXPECT_DEATH(strncat(to, from, from_size + 1), RightOOBReadMessage(0));
|
||||||
// "to" is not zero-terminated.
|
// "to" is not zero-terminated.
|
||||||
EXPECT_DEATH(strncat(to + 1, from, 1), RightOOBErrorMessage(0));
|
EXPECT_DEATH(strncat(to + 1, from, 1), RightOOBWriteMessage(0));
|
||||||
// "to" is too short to fit "from".
|
// "to" is too short to fit "from".
|
||||||
to[0] = 'z';
|
to[0] = 'z';
|
||||||
to[to_size - from_size + 1] = '\0';
|
to[to_size - from_size + 1] = '\0';
|
||||||
EXPECT_DEATH(strncat(to, from, from_size - 1), RightOOBErrorMessage(0));
|
EXPECT_DEATH(strncat(to, from, from_size - 1), RightOOBWriteMessage(0));
|
||||||
// "to" is just enough.
|
// "to" is just enough.
|
||||||
strncat(to, from, from_size - 2);
|
strncat(to, from, from_size - 2);
|
||||||
|
|
||||||
|
|
@ -1447,10 +1563,10 @@ typedef void(*PointerToCallAtoi)(const char*);
|
||||||
void RunAtoiOOBTest(PointerToCallAtoi Atoi) {
|
void RunAtoiOOBTest(PointerToCallAtoi Atoi) {
|
||||||
char *array = MallocAndMemsetString(10, '1');
|
char *array = MallocAndMemsetString(10, '1');
|
||||||
// Invalid pointer to the string.
|
// Invalid pointer to the string.
|
||||||
EXPECT_DEATH(Atoi(array + 11), RightOOBErrorMessage(1));
|
EXPECT_DEATH(Atoi(array + 11), RightOOBReadMessage(1));
|
||||||
EXPECT_DEATH(Atoi(array - 1), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Atoi(array - 1), LeftOOBReadMessage(1));
|
||||||
// Die if a buffer doesn't have terminating NULL.
|
// Die if a buffer doesn't have terminating NULL.
|
||||||
EXPECT_DEATH(Atoi(array), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0));
|
||||||
// Make last symbol a terminating NULL or other non-digit.
|
// Make last symbol a terminating NULL or other non-digit.
|
||||||
array[9] = '\0';
|
array[9] = '\0';
|
||||||
Atoi(array);
|
Atoi(array);
|
||||||
|
|
@ -1459,13 +1575,13 @@ void RunAtoiOOBTest(PointerToCallAtoi Atoi) {
|
||||||
Atoi(array + 9);
|
Atoi(array + 9);
|
||||||
// Sometimes we need to detect overflow if no digits are found.
|
// Sometimes we need to detect overflow if no digits are found.
|
||||||
memset(array, ' ', 10);
|
memset(array, ' ', 10);
|
||||||
EXPECT_DEATH(Atoi(array), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0));
|
||||||
array[9] = '-';
|
array[9] = '-';
|
||||||
EXPECT_DEATH(Atoi(array), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Atoi(array + 9), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Atoi(array + 9), RightOOBReadMessage(0));
|
||||||
array[8] = '-';
|
array[8] = '-';
|
||||||
Atoi(array);
|
Atoi(array);
|
||||||
delete array;
|
free(array);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AddressSanitizer, AtoiAndFriendsOOBTest) {
|
TEST(AddressSanitizer, AtoiAndFriendsOOBTest) {
|
||||||
|
|
@ -1489,16 +1605,16 @@ void RunStrtolOOBTest(PointerToCallStrtol Strtol) {
|
||||||
array[1] = '2';
|
array[1] = '2';
|
||||||
array[2] = '3';
|
array[2] = '3';
|
||||||
// Invalid pointer to the string.
|
// Invalid pointer to the string.
|
||||||
EXPECT_DEATH(Strtol(array + 3, NULL, 0), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Strtol(array + 3, NULL, 0), RightOOBReadMessage(0));
|
||||||
EXPECT_DEATH(Strtol(array - 1, NULL, 0), LeftOOBErrorMessage(1));
|
EXPECT_DEATH(Strtol(array - 1, NULL, 0), LeftOOBReadMessage(1));
|
||||||
// Buffer overflow if there is no terminating null (depends on base).
|
// Buffer overflow if there is no terminating null (depends on base).
|
||||||
Strtol(array, &endptr, 3);
|
Strtol(array, &endptr, 3);
|
||||||
EXPECT_EQ(array + 2, endptr);
|
EXPECT_EQ(array + 2, endptr);
|
||||||
EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0));
|
||||||
array[2] = 'z';
|
array[2] = 'z';
|
||||||
Strtol(array, &endptr, 35);
|
Strtol(array, &endptr, 35);
|
||||||
EXPECT_EQ(array + 2, endptr);
|
EXPECT_EQ(array + 2, endptr);
|
||||||
EXPECT_DEATH(Strtol(array, NULL, 36), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Strtol(array, NULL, 36), RightOOBReadMessage(0));
|
||||||
// Add terminating zero to get rid of overflow.
|
// Add terminating zero to get rid of overflow.
|
||||||
array[2] = '\0';
|
array[2] = '\0';
|
||||||
Strtol(array, NULL, 36);
|
Strtol(array, NULL, 36);
|
||||||
|
|
@ -1507,11 +1623,11 @@ void RunStrtolOOBTest(PointerToCallStrtol Strtol) {
|
||||||
Strtol(array + 3, NULL, 1);
|
Strtol(array + 3, NULL, 1);
|
||||||
// Sometimes we need to detect overflow if no digits are found.
|
// Sometimes we need to detect overflow if no digits are found.
|
||||||
array[0] = array[1] = array[2] = ' ';
|
array[0] = array[1] = array[2] = ' ';
|
||||||
EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0));
|
||||||
array[2] = '+';
|
array[2] = '+';
|
||||||
EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0));
|
||||||
array[2] = '-';
|
array[2] = '-';
|
||||||
EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBErrorMessage(0));
|
EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0));
|
||||||
array[1] = '+';
|
array[1] = '+';
|
||||||
Strtol(array, NULL, 0);
|
Strtol(array, NULL, 0);
|
||||||
array[1] = array[2] = 'z';
|
array[1] = array[2] = 'z';
|
||||||
|
|
@ -1519,7 +1635,7 @@ void RunStrtolOOBTest(PointerToCallStrtol Strtol) {
|
||||||
EXPECT_EQ(array, endptr);
|
EXPECT_EQ(array, endptr);
|
||||||
Strtol(array + 2, NULL, 0);
|
Strtol(array + 2, NULL, 0);
|
||||||
EXPECT_EQ(array, endptr);
|
EXPECT_EQ(array, endptr);
|
||||||
delete array;
|
free(array);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AddressSanitizer, StrtollOOBTest) {
|
TEST(AddressSanitizer, StrtollOOBTest) {
|
||||||
|
|
@ -1538,7 +1654,7 @@ typedef void*(*PointerToMemSet)(void*, int, size_t);
|
||||||
void CallMemSetByPointer(PointerToMemSet MemSet) {
|
void CallMemSetByPointer(PointerToMemSet MemSet) {
|
||||||
size_t size = Ident(100);
|
size_t size = Ident(100);
|
||||||
char *array = Ident((char*)malloc(size));
|
char *array = Ident((char*)malloc(size));
|
||||||
EXPECT_DEATH(MemSet(array, 0, 101), RightOOBErrorMessage(0));
|
EXPECT_DEATH(MemSet(array, 0, 101), RightOOBWriteMessage(0));
|
||||||
free(array);
|
free(array);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1546,7 +1662,7 @@ void CallMemTransferByPointer(PointerToMemTransfer MemTransfer) {
|
||||||
size_t size = Ident(100);
|
size_t size = Ident(100);
|
||||||
char *src = Ident((char*)malloc(size));
|
char *src = Ident((char*)malloc(size));
|
||||||
char *dst = Ident((char*)malloc(size));
|
char *dst = Ident((char*)malloc(size));
|
||||||
EXPECT_DEATH(MemTransfer(dst, src, 101), RightOOBErrorMessage(0));
|
EXPECT_DEATH(MemTransfer(dst, src, 101), RightOOBWriteMessage(0));
|
||||||
free(src);
|
free(src);
|
||||||
free(dst);
|
free(dst);
|
||||||
}
|
}
|
||||||
|
|
@ -1557,12 +1673,51 @@ TEST(AddressSanitizer, DISABLED_MemIntrinsicCallByPointerTest) {
|
||||||
CallMemTransferByPointer(&memmove);
|
CallMemTransferByPointer(&memmove);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(__linux__) && !defined(ANDROID) && !defined(__ANDROID__)
|
||||||
|
TEST(AddressSanitizer, pread) {
|
||||||
|
char *x = new char[10];
|
||||||
|
int fd = open("/proc/self/stat", O_RDONLY);
|
||||||
|
ASSERT_GT(fd, 0);
|
||||||
|
EXPECT_DEATH(pread(fd, x, 15, 0),
|
||||||
|
ASAN_PCRE_DOTALL
|
||||||
|
"AddressSanitizer: heap-buffer-overflow"
|
||||||
|
".* is located 0 bytes to the right of 10-byte region");
|
||||||
|
close(fd);
|
||||||
|
delete [] x;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddressSanitizer, pread64) {
|
||||||
|
char *x = new char[10];
|
||||||
|
int fd = open("/proc/self/stat", O_RDONLY);
|
||||||
|
ASSERT_GT(fd, 0);
|
||||||
|
EXPECT_DEATH(pread64(fd, x, 15, 0),
|
||||||
|
ASAN_PCRE_DOTALL
|
||||||
|
"AddressSanitizer: heap-buffer-overflow"
|
||||||
|
".* is located 0 bytes to the right of 10-byte region");
|
||||||
|
close(fd);
|
||||||
|
delete [] x;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddressSanitizer, read) {
|
||||||
|
char *x = new char[10];
|
||||||
|
int fd = open("/proc/self/stat", O_RDONLY);
|
||||||
|
ASSERT_GT(fd, 0);
|
||||||
|
EXPECT_DEATH(read(fd, x, 15),
|
||||||
|
ASAN_PCRE_DOTALL
|
||||||
|
"AddressSanitizer: heap-buffer-overflow"
|
||||||
|
".* is located 0 bytes to the right of 10-byte region");
|
||||||
|
close(fd);
|
||||||
|
delete [] x;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // defined(__linux__) && !defined(ANDROID) && !defined(__ANDROID__)
|
||||||
|
|
||||||
// This test case fails
|
// This test case fails
|
||||||
// Clang optimizes memcpy/memset calls which lead to unaligned access
|
// Clang optimizes memcpy/memset calls which lead to unaligned access
|
||||||
TEST(AddressSanitizer, DISABLED_MemIntrinsicUnalignedAccessTest) {
|
TEST(AddressSanitizer, DISABLED_MemIntrinsicUnalignedAccessTest) {
|
||||||
int size = Ident(4096);
|
int size = Ident(4096);
|
||||||
char *s = Ident((char*)malloc(size));
|
char *s = Ident((char*)malloc(size));
|
||||||
EXPECT_DEATH(memset(s + size - 1, 0, 2), RightOOBErrorMessage(0));
|
EXPECT_DEATH(memset(s + size - 1, 0, 2), RightOOBWriteMessage(0));
|
||||||
free(s);
|
free(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1617,19 +1772,30 @@ TEST(AddressSanitizer, DISABLED_MallocFreeUnwindAndSymbolizeTest) {
|
||||||
"malloc_fff.*malloc_eee.*malloc_ddd");
|
"malloc_fff.*malloc_eee.*malloc_ddd");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool TryToSetThreadName(const char *name) {
|
||||||
|
#if defined(__linux__) && defined(PR_SET_NAME)
|
||||||
|
return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0);
|
||||||
|
#else
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void *ThreadedTestAlloc(void *a) {
|
void *ThreadedTestAlloc(void *a) {
|
||||||
|
EXPECT_EQ(true, TryToSetThreadName("AllocThr"));
|
||||||
int **p = (int**)a;
|
int **p = (int**)a;
|
||||||
*p = new int;
|
*p = new int;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *ThreadedTestFree(void *a) {
|
void *ThreadedTestFree(void *a) {
|
||||||
|
EXPECT_EQ(true, TryToSetThreadName("FreeThr"));
|
||||||
int **p = (int**)a;
|
int **p = (int**)a;
|
||||||
delete *p;
|
delete *p;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *ThreadedTestUse(void *a) {
|
void *ThreadedTestUse(void *a) {
|
||||||
|
EXPECT_EQ(true, TryToSetThreadName("UseThr"));
|
||||||
int **p = (int**)a;
|
int **p = (int**)a;
|
||||||
**p = 1;
|
**p = 1;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
@ -1654,6 +1820,30 @@ TEST(AddressSanitizer, ThreadedTest) {
|
||||||
".*Thread T.*created");
|
".*Thread T.*created");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void *ThreadedTestFunc(void *unused) {
|
||||||
|
// Check if prctl(PR_SET_NAME) is supported. Return if not.
|
||||||
|
if (!TryToSetThreadName("TestFunc"))
|
||||||
|
return 0;
|
||||||
|
EXPECT_DEATH(ThreadedTestSpawn(),
|
||||||
|
ASAN_PCRE_DOTALL
|
||||||
|
"WRITE .*thread T. .UseThr."
|
||||||
|
".*freed by thread T. .FreeThr. here:"
|
||||||
|
".*previously allocated by thread T. .AllocThr. here:"
|
||||||
|
".*Thread T. .UseThr. created by T.*TestFunc"
|
||||||
|
".*Thread T. .FreeThr. created by T"
|
||||||
|
".*Thread T. .AllocThr. created by T"
|
||||||
|
"");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(AddressSanitizer, ThreadNamesTest) {
|
||||||
|
// Run ThreadedTestFunc in a separate thread because it tries to set a
|
||||||
|
// thread name and we don't want to change the main thread's name.
|
||||||
|
pthread_t t;
|
||||||
|
PTHREAD_CREATE(&t, 0, ThreadedTestFunc, 0);
|
||||||
|
PTHREAD_JOIN(t, 0);
|
||||||
|
}
|
||||||
|
|
||||||
#if ASAN_NEEDS_SEGV
|
#if ASAN_NEEDS_SEGV
|
||||||
TEST(AddressSanitizer, ShadowGapTest) {
|
TEST(AddressSanitizer, ShadowGapTest) {
|
||||||
#if SANITIZER_WORDSIZE == 32
|
#if SANITIZER_WORDSIZE == 32
|
||||||
|
|
@ -1868,6 +2058,27 @@ TEST(AddressSanitizer, AttributeNoAddressSafetyTest) {
|
||||||
Ident(NoAddressSafety)();
|
Ident(NoAddressSafety)();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static string MismatchStr(const string &str) {
|
||||||
|
return string("AddressSanitizer: alloc-dealloc-mismatch \\(") + str;
|
||||||
|
}
|
||||||
|
|
||||||
|
// This test is disabled until we enable alloc_dealloc_mismatch by default.
|
||||||
|
// The feature is also tested by lit tests.
|
||||||
|
TEST(AddressSanitizer, DISABLED_AllocDeallocMismatch) {
|
||||||
|
EXPECT_DEATH(free(Ident(new int)),
|
||||||
|
MismatchStr("operator new vs free"));
|
||||||
|
EXPECT_DEATH(free(Ident(new int[2])),
|
||||||
|
MismatchStr("operator new \\[\\] vs free"));
|
||||||
|
EXPECT_DEATH(delete (Ident(new int[2])),
|
||||||
|
MismatchStr("operator new \\[\\] vs operator delete"));
|
||||||
|
EXPECT_DEATH(delete (Ident((int*)malloc(2 * sizeof(int)))),
|
||||||
|
MismatchStr("malloc vs operator delete"));
|
||||||
|
EXPECT_DEATH(delete [] (Ident(new int)),
|
||||||
|
MismatchStr("operator new vs operator delete \\[\\]"));
|
||||||
|
EXPECT_DEATH(delete [] (Ident((int*)malloc(2 * sizeof(int)))),
|
||||||
|
MismatchStr("malloc vs operator delete \\[\\]"));
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------ demo tests; run each one-by-one -------------
|
// ------------------ demo tests; run each one-by-one -------------
|
||||||
// e.g. --gtest_filter=*DemoOOBLeftHigh --gtest_also_run_disabled_tests
|
// e.g. --gtest_filter=*DemoOOBLeftHigh --gtest_also_run_disabled_tests
|
||||||
TEST(AddressSanitizer, DISABLED_DemoThreadedTest) {
|
TEST(AddressSanitizer, DISABLED_DemoThreadedTest) {
|
||||||
|
|
@ -2033,53 +2244,56 @@ TEST(AddressSanitizerMac, DISABLED_CFAllocatorMallocZoneDoubleFree) {
|
||||||
EXPECT_DEATH(CFAllocatorMallocZoneDoubleFree(), "attempting double-free");
|
EXPECT_DEATH(CFAllocatorMallocZoneDoubleFree(), "attempting double-free");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For libdispatch tests below we check that ASan got to the shadow byte
|
||||||
|
// legend, i.e. managed to print the thread stacks (this almost certainly
|
||||||
|
// means that the libdispatch task creation has been intercepted correctly).
|
||||||
TEST(AddressSanitizerMac, GCDDispatchAsync) {
|
TEST(AddressSanitizerMac, GCDDispatchAsync) {
|
||||||
// Make sure the whole ASan report is printed, i.e. that we don't die
|
// Make sure the whole ASan report is printed, i.e. that we don't die
|
||||||
// on a CHECK.
|
// on a CHECK.
|
||||||
EXPECT_DEATH(TestGCDDispatchAsync(), "Shadow byte and word");
|
EXPECT_DEATH(TestGCDDispatchAsync(), "Shadow byte legend");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AddressSanitizerMac, GCDDispatchSync) {
|
TEST(AddressSanitizerMac, GCDDispatchSync) {
|
||||||
// Make sure the whole ASan report is printed, i.e. that we don't die
|
// Make sure the whole ASan report is printed, i.e. that we don't die
|
||||||
// on a CHECK.
|
// on a CHECK.
|
||||||
EXPECT_DEATH(TestGCDDispatchSync(), "Shadow byte and word");
|
EXPECT_DEATH(TestGCDDispatchSync(), "Shadow byte legend");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TEST(AddressSanitizerMac, GCDReuseWqthreadsAsync) {
|
TEST(AddressSanitizerMac, GCDReuseWqthreadsAsync) {
|
||||||
// Make sure the whole ASan report is printed, i.e. that we don't die
|
// Make sure the whole ASan report is printed, i.e. that we don't die
|
||||||
// on a CHECK.
|
// on a CHECK.
|
||||||
EXPECT_DEATH(TestGCDReuseWqthreadsAsync(), "Shadow byte and word");
|
EXPECT_DEATH(TestGCDReuseWqthreadsAsync(), "Shadow byte legend");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AddressSanitizerMac, GCDReuseWqthreadsSync) {
|
TEST(AddressSanitizerMac, GCDReuseWqthreadsSync) {
|
||||||
// Make sure the whole ASan report is printed, i.e. that we don't die
|
// Make sure the whole ASan report is printed, i.e. that we don't die
|
||||||
// on a CHECK.
|
// on a CHECK.
|
||||||
EXPECT_DEATH(TestGCDReuseWqthreadsSync(), "Shadow byte and word");
|
EXPECT_DEATH(TestGCDReuseWqthreadsSync(), "Shadow byte legend");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AddressSanitizerMac, GCDDispatchAfter) {
|
TEST(AddressSanitizerMac, GCDDispatchAfter) {
|
||||||
// Make sure the whole ASan report is printed, i.e. that we don't die
|
// Make sure the whole ASan report is printed, i.e. that we don't die
|
||||||
// on a CHECK.
|
// on a CHECK.
|
||||||
EXPECT_DEATH(TestGCDDispatchAfter(), "Shadow byte and word");
|
EXPECT_DEATH(TestGCDDispatchAfter(), "Shadow byte legend");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AddressSanitizerMac, GCDSourceEvent) {
|
TEST(AddressSanitizerMac, GCDSourceEvent) {
|
||||||
// Make sure the whole ASan report is printed, i.e. that we don't die
|
// Make sure the whole ASan report is printed, i.e. that we don't die
|
||||||
// on a CHECK.
|
// on a CHECK.
|
||||||
EXPECT_DEATH(TestGCDSourceEvent(), "Shadow byte and word");
|
EXPECT_DEATH(TestGCDSourceEvent(), "Shadow byte legend");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AddressSanitizerMac, GCDSourceCancel) {
|
TEST(AddressSanitizerMac, GCDSourceCancel) {
|
||||||
// Make sure the whole ASan report is printed, i.e. that we don't die
|
// Make sure the whole ASan report is printed, i.e. that we don't die
|
||||||
// on a CHECK.
|
// on a CHECK.
|
||||||
EXPECT_DEATH(TestGCDSourceCancel(), "Shadow byte and word");
|
EXPECT_DEATH(TestGCDSourceCancel(), "Shadow byte legend");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(AddressSanitizerMac, GCDGroupAsync) {
|
TEST(AddressSanitizerMac, GCDGroupAsync) {
|
||||||
// Make sure the whole ASan report is printed, i.e. that we don't die
|
// Make sure the whole ASan report is printed, i.e. that we don't die
|
||||||
// on a CHECK.
|
// on a CHECK.
|
||||||
EXPECT_DEATH(TestGCDGroupAsync(), "Shadow byte and word");
|
EXPECT_DEATH(TestGCDGroupAsync(), "Shadow byte legend");
|
||||||
}
|
}
|
||||||
|
|
||||||
void *MallocIntrospectionLockWorker(void *_) {
|
void *MallocIntrospectionLockWorker(void *_) {
|
||||||
|
|
@ -2172,7 +2386,7 @@ TEST(AddressSanitizerMac, NSURLDeallocation) {
|
||||||
TEST(AddressSanitizerMac, Mstats) {
|
TEST(AddressSanitizerMac, Mstats) {
|
||||||
malloc_statistics_t stats1, stats2;
|
malloc_statistics_t stats1, stats2;
|
||||||
malloc_zone_statistics(/*all zones*/NULL, &stats1);
|
malloc_zone_statistics(/*all zones*/NULL, &stats1);
|
||||||
const int kMallocSize = 100000;
|
const size_t kMallocSize = 100000;
|
||||||
void *alloc = Ident(malloc(kMallocSize));
|
void *alloc = Ident(malloc(kMallocSize));
|
||||||
malloc_zone_statistics(/*all zones*/NULL, &stats2);
|
malloc_zone_statistics(/*all zones*/NULL, &stats2);
|
||||||
EXPECT_GT(stats2.blocks_in_use, stats1.blocks_in_use);
|
EXPECT_GT(stats2.blocks_in_use, stats1.blocks_in_use);
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,13 @@
|
||||||
|
2013-01-10 Kostya Serebryany <kcc@google.com>
|
||||||
|
|
||||||
|
* All source files: Merge from upstream r171973.
|
||||||
|
* sanitizer_common/Makefile.am: Added new files.
|
||||||
|
* asan/Makefile.am: Likewise.
|
||||||
|
* tsan/Makefile.am: Likewise.
|
||||||
|
* sanitizer_common/Makefile.in: Regenerated.
|
||||||
|
* asan/Makefile.in: Likewise.
|
||||||
|
* tsan/Makefile.in: Likewise.
|
||||||
|
|
||||||
2013-01-07 H.J. Lu <hongjiu.lu@intel.com>
|
2013-01-07 H.J. Lu <hongjiu.lu@intel.com>
|
||||||
|
|
||||||
* asan/Makefile.am (libasan_la_LIBADD): Replace
|
* asan/Makefile.am (libasan_la_LIBADD): Replace
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
169392
|
171973
|
||||||
|
|
||||||
The first line of this file holds the svn revision number of the
|
The first line of this file holds the svn revision number of the
|
||||||
last merge done from the master library sources.
|
last merge done from the master library sources.
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,7 @@ toolexeclib_LTLIBRARIES = libasan.la
|
||||||
|
|
||||||
asan_files = \
|
asan_files = \
|
||||||
asan_allocator.cc \
|
asan_allocator.cc \
|
||||||
|
asan_allocator2.cc \
|
||||||
asan_interceptors.cc \
|
asan_interceptors.cc \
|
||||||
asan_mac.cc \
|
asan_mac.cc \
|
||||||
asan_malloc_mac.cc \
|
asan_malloc_mac.cc \
|
||||||
|
|
@ -23,6 +24,7 @@ asan_files = \
|
||||||
asan_rtl.cc \
|
asan_rtl.cc \
|
||||||
asan_stats.cc \
|
asan_stats.cc \
|
||||||
asan_thread_registry.cc \
|
asan_thread_registry.cc \
|
||||||
|
asan_fake_stack.cc \
|
||||||
asan_globals.cc \
|
asan_globals.cc \
|
||||||
asan_linux.cc \
|
asan_linux.cc \
|
||||||
asan_malloc_linux.cc \
|
asan_malloc_linux.cc \
|
||||||
|
|
|
||||||
|
|
@ -84,19 +84,20 @@ am__DEPENDENCIES_1 =
|
||||||
@USING_MAC_INTERPOSE_FALSE@ $(am__DEPENDENCIES_1)
|
@USING_MAC_INTERPOSE_FALSE@ $(am__DEPENDENCIES_1)
|
||||||
@USING_MAC_INTERPOSE_TRUE@libasan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
|
@USING_MAC_INTERPOSE_TRUE@libasan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la \
|
||||||
@USING_MAC_INTERPOSE_TRUE@ $(am__DEPENDENCIES_1)
|
@USING_MAC_INTERPOSE_TRUE@ $(am__DEPENDENCIES_1)
|
||||||
am__libasan_la_SOURCES_DIST = asan_allocator.cc asan_interceptors.cc \
|
am__libasan_la_SOURCES_DIST = asan_allocator.cc asan_allocator2.cc \
|
||||||
asan_mac.cc asan_malloc_mac.cc asan_new_delete.cc \
|
asan_interceptors.cc asan_mac.cc asan_malloc_mac.cc \
|
||||||
asan_posix.cc asan_rtl.cc asan_stats.cc \
|
asan_new_delete.cc asan_posix.cc asan_rtl.cc asan_stats.cc \
|
||||||
asan_thread_registry.cc asan_globals.cc asan_linux.cc \
|
asan_thread_registry.cc asan_fake_stack.cc asan_globals.cc \
|
||||||
asan_malloc_linux.cc asan_malloc_win.cc asan_poisoning.cc \
|
asan_linux.cc asan_malloc_linux.cc asan_malloc_win.cc \
|
||||||
asan_report.cc asan_stack.cc asan_thread.cc asan_win.cc \
|
asan_poisoning.cc asan_report.cc asan_stack.cc asan_thread.cc \
|
||||||
dynamic/asan_interceptors_dynamic.cc
|
asan_win.cc dynamic/asan_interceptors_dynamic.cc
|
||||||
am__objects_1 = asan_allocator.lo asan_interceptors.lo asan_mac.lo \
|
am__objects_1 = asan_allocator.lo asan_allocator2.lo \
|
||||||
asan_malloc_mac.lo asan_new_delete.lo asan_posix.lo \
|
asan_interceptors.lo asan_mac.lo asan_malloc_mac.lo \
|
||||||
asan_rtl.lo asan_stats.lo asan_thread_registry.lo \
|
asan_new_delete.lo asan_posix.lo asan_rtl.lo asan_stats.lo \
|
||||||
asan_globals.lo asan_linux.lo asan_malloc_linux.lo \
|
asan_thread_registry.lo asan_fake_stack.lo asan_globals.lo \
|
||||||
asan_malloc_win.lo asan_poisoning.lo asan_report.lo \
|
asan_linux.lo asan_malloc_linux.lo asan_malloc_win.lo \
|
||||||
asan_stack.lo asan_thread.lo asan_win.lo
|
asan_poisoning.lo asan_report.lo asan_stack.lo asan_thread.lo \
|
||||||
|
asan_win.lo
|
||||||
@USING_MAC_INTERPOSE_TRUE@am__objects_2 = \
|
@USING_MAC_INTERPOSE_TRUE@am__objects_2 = \
|
||||||
@USING_MAC_INTERPOSE_TRUE@ asan_interceptors_dynamic.lo
|
@USING_MAC_INTERPOSE_TRUE@ asan_interceptors_dynamic.lo
|
||||||
am_libasan_la_OBJECTS = $(am__objects_1) $(am__objects_2)
|
am_libasan_la_OBJECTS = $(am__objects_1) $(am__objects_2)
|
||||||
|
|
@ -269,6 +270,7 @@ ACLOCAL_AMFLAGS = -I $(top_srcdir) -I $(top_srcdir)/config
|
||||||
toolexeclib_LTLIBRARIES = libasan.la
|
toolexeclib_LTLIBRARIES = libasan.la
|
||||||
asan_files = \
|
asan_files = \
|
||||||
asan_allocator.cc \
|
asan_allocator.cc \
|
||||||
|
asan_allocator2.cc \
|
||||||
asan_interceptors.cc \
|
asan_interceptors.cc \
|
||||||
asan_mac.cc \
|
asan_mac.cc \
|
||||||
asan_malloc_mac.cc \
|
asan_malloc_mac.cc \
|
||||||
|
|
@ -277,6 +279,7 @@ asan_files = \
|
||||||
asan_rtl.cc \
|
asan_rtl.cc \
|
||||||
asan_stats.cc \
|
asan_stats.cc \
|
||||||
asan_thread_registry.cc \
|
asan_thread_registry.cc \
|
||||||
|
asan_fake_stack.cc \
|
||||||
asan_globals.cc \
|
asan_globals.cc \
|
||||||
asan_linux.cc \
|
asan_linux.cc \
|
||||||
asan_malloc_linux.cc \
|
asan_malloc_linux.cc \
|
||||||
|
|
@ -409,6 +412,8 @@ distclean-compile:
|
||||||
-rm -f *.tab.c
|
-rm -f *.tab.c
|
||||||
|
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
|
||||||
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
|
||||||
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors_dynamic.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors_dynamic.Plo@am__quote@
|
||||||
|
|
|
||||||
|
|
@ -22,8 +22,9 @@
|
||||||
// Once freed, the body of the chunk contains the stack trace of the free call.
|
// Once freed, the body of the chunk contains the stack trace of the free call.
|
||||||
//
|
//
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
#include "asan_allocator.h"
|
#include "asan_allocator.h"
|
||||||
|
|
||||||
|
#if ASAN_ALLOCATOR_VERSION == 1
|
||||||
#include "asan_interceptors.h"
|
#include "asan_interceptors.h"
|
||||||
#include "asan_internal.h"
|
#include "asan_internal.h"
|
||||||
#include "asan_lock.h"
|
#include "asan_lock.h"
|
||||||
|
|
@ -35,10 +36,6 @@
|
||||||
#include "sanitizer/asan_interface.h"
|
#include "sanitizer/asan_interface.h"
|
||||||
#include "sanitizer_common/sanitizer_atomic.h"
|
#include "sanitizer_common/sanitizer_atomic.h"
|
||||||
|
|
||||||
#if defined(_WIN32) && !defined(__clang__)
|
|
||||||
#include <intrin.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
#define REDZONE ((uptr)(flags()->redzone))
|
#define REDZONE ((uptr)(flags()->redzone))
|
||||||
|
|
@ -58,42 +55,6 @@ static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
|
||||||
static const uptr kMaxAllowedMallocSize =
|
static const uptr kMaxAllowedMallocSize =
|
||||||
(SANITIZER_WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
|
(SANITIZER_WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
|
||||||
|
|
||||||
static inline bool IsAligned(uptr a, uptr alignment) {
|
|
||||||
return (a & (alignment - 1)) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline uptr Log2(uptr x) {
|
|
||||||
CHECK(IsPowerOfTwo(x));
|
|
||||||
#if !defined(_WIN32) || defined(__clang__)
|
|
||||||
return __builtin_ctzl(x);
|
|
||||||
#elif defined(_WIN64)
|
|
||||||
unsigned long ret; // NOLINT
|
|
||||||
_BitScanForward64(&ret, x);
|
|
||||||
return ret;
|
|
||||||
#else
|
|
||||||
unsigned long ret; // NOLINT
|
|
||||||
_BitScanForward(&ret, x);
|
|
||||||
return ret;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline uptr RoundUpToPowerOfTwo(uptr size) {
|
|
||||||
CHECK(size);
|
|
||||||
if (IsPowerOfTwo(size)) return size;
|
|
||||||
|
|
||||||
unsigned long up; // NOLINT
|
|
||||||
#if !defined(_WIN32) || defined(__clang__)
|
|
||||||
up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
|
|
||||||
#elif defined(_WIN64)
|
|
||||||
_BitScanReverse64(&up, size);
|
|
||||||
#else
|
|
||||||
_BitScanReverse(&up, size);
|
|
||||||
#endif
|
|
||||||
CHECK(size < (1ULL << (up + 1)));
|
|
||||||
CHECK(size > (1ULL << up));
|
|
||||||
return 1UL << (up + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline uptr SizeClassToSize(u8 size_class) {
|
static inline uptr SizeClassToSize(u8 size_class) {
|
||||||
CHECK(size_class < kNumberOfSizeClasses);
|
CHECK(size_class < kNumberOfSizeClasses);
|
||||||
if (size_class <= kMallocSizeClassStepLog) {
|
if (size_class <= kMallocSizeClassStepLog) {
|
||||||
|
|
@ -165,7 +126,8 @@ struct ChunkBase {
|
||||||
|
|
||||||
// Second 8 bytes.
|
// Second 8 bytes.
|
||||||
uptr alignment_log : 8;
|
uptr alignment_log : 8;
|
||||||
uptr used_size : FIRST_32_SECOND_64(32, 56); // Size requested by the user.
|
uptr alloc_type : 2;
|
||||||
|
uptr used_size : FIRST_32_SECOND_64(32, 54); // Size requested by the user.
|
||||||
|
|
||||||
// This field may overlap with the user area and thus should not
|
// This field may overlap with the user area and thus should not
|
||||||
// be used while the chunk is in CHUNK_ALLOCATED state.
|
// be used while the chunk is in CHUNK_ALLOCATED state.
|
||||||
|
|
@ -215,33 +177,6 @@ void AsanChunkView::GetFreeStack(StackTrace *stack) {
|
||||||
chunk_->compressed_free_stack_size());
|
chunk_->compressed_free_stack_size());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AsanChunkView::AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
|
|
||||||
if (addr >= Beg() && (addr + access_size) <= End()) {
|
|
||||||
*offset = addr - Beg();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool AsanChunkView::AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
|
|
||||||
if (addr < Beg()) {
|
|
||||||
*offset = Beg() - addr;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool AsanChunkView::AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
|
|
||||||
if (addr + access_size >= End()) {
|
|
||||||
if (addr <= End())
|
|
||||||
*offset = 0;
|
|
||||||
else
|
|
||||||
*offset = addr - End();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static AsanChunk *PtrToChunk(uptr ptr) {
|
static AsanChunk *PtrToChunk(uptr ptr) {
|
||||||
AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
|
AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
|
||||||
if (m->chunk_state == CHUNK_MEMALIGN) {
|
if (m->chunk_state == CHUNK_MEMALIGN) {
|
||||||
|
|
@ -252,34 +187,13 @@ static AsanChunk *PtrToChunk(uptr ptr) {
|
||||||
|
|
||||||
void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
|
void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
|
||||||
CHECK(q->size() > 0);
|
CHECK(q->size() > 0);
|
||||||
if (last_) {
|
|
||||||
CHECK(first_);
|
|
||||||
CHECK(!last_->next);
|
|
||||||
last_->next = q->first_;
|
|
||||||
last_ = q->last_;
|
|
||||||
} else {
|
|
||||||
CHECK(!first_);
|
|
||||||
last_ = q->last_;
|
|
||||||
first_ = q->first_;
|
|
||||||
CHECK(first_);
|
|
||||||
}
|
|
||||||
CHECK(last_);
|
|
||||||
CHECK(!last_->next);
|
|
||||||
size_ += q->size();
|
size_ += q->size();
|
||||||
|
append_back(q);
|
||||||
q->clear();
|
q->clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
void AsanChunkFifoList::Push(AsanChunk *n) {
|
void AsanChunkFifoList::Push(AsanChunk *n) {
|
||||||
CHECK(n->next == 0);
|
push_back(n);
|
||||||
if (last_) {
|
|
||||||
CHECK(first_);
|
|
||||||
CHECK(!last_->next);
|
|
||||||
last_->next = n;
|
|
||||||
last_ = n;
|
|
||||||
} else {
|
|
||||||
CHECK(!first_);
|
|
||||||
last_ = first_ = n;
|
|
||||||
}
|
|
||||||
size_ += n->Size();
|
size_ += n->Size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -288,15 +202,9 @@ void AsanChunkFifoList::Push(AsanChunk *n) {
|
||||||
// ago. Not sure if we can or want to do anything with this.
|
// ago. Not sure if we can or want to do anything with this.
|
||||||
AsanChunk *AsanChunkFifoList::Pop() {
|
AsanChunk *AsanChunkFifoList::Pop() {
|
||||||
CHECK(first_);
|
CHECK(first_);
|
||||||
AsanChunk *res = first_;
|
AsanChunk *res = front();
|
||||||
first_ = first_->next;
|
|
||||||
if (first_ == 0)
|
|
||||||
last_ = 0;
|
|
||||||
CHECK(size_ >= res->Size());
|
|
||||||
size_ -= res->Size();
|
size_ -= res->Size();
|
||||||
if (last_) {
|
pop_front();
|
||||||
CHECK(!last_->next);
|
|
||||||
}
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -588,7 +496,8 @@ AsanChunkView FindHeapChunkByAddress(uptr address) {
|
||||||
return AsanChunkView(malloc_info.FindChunkByAddr(address));
|
return AsanChunkView(malloc_info.FindChunkByAddr(address));
|
||||||
}
|
}
|
||||||
|
|
||||||
static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack) {
|
static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack,
|
||||||
|
AllocType alloc_type) {
|
||||||
__asan_init();
|
__asan_init();
|
||||||
CHECK(stack);
|
CHECK(stack);
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
|
|
@ -645,6 +554,7 @@ static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack) {
|
||||||
CHECK(m);
|
CHECK(m);
|
||||||
CHECK(m->chunk_state == CHUNK_AVAILABLE);
|
CHECK(m->chunk_state == CHUNK_AVAILABLE);
|
||||||
m->chunk_state = CHUNK_ALLOCATED;
|
m->chunk_state = CHUNK_ALLOCATED;
|
||||||
|
m->alloc_type = alloc_type;
|
||||||
m->next = 0;
|
m->next = 0;
|
||||||
CHECK(m->Size() == size_to_allocate);
|
CHECK(m->Size() == size_to_allocate);
|
||||||
uptr addr = (uptr)m + REDZONE;
|
uptr addr = (uptr)m + REDZONE;
|
||||||
|
|
@ -679,7 +589,7 @@ static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack) {
|
||||||
return (u8*)addr;
|
return (u8*)addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void Deallocate(u8 *ptr, StackTrace *stack) {
|
static void Deallocate(u8 *ptr, StackTrace *stack, AllocType alloc_type) {
|
||||||
if (!ptr) return;
|
if (!ptr) return;
|
||||||
CHECK(stack);
|
CHECK(stack);
|
||||||
|
|
||||||
|
|
@ -700,6 +610,9 @@ static void Deallocate(u8 *ptr, StackTrace *stack) {
|
||||||
ReportFreeNotMalloced((uptr)ptr, stack);
|
ReportFreeNotMalloced((uptr)ptr, stack);
|
||||||
}
|
}
|
||||||
CHECK(old_chunk_state == CHUNK_ALLOCATED);
|
CHECK(old_chunk_state == CHUNK_ALLOCATED);
|
||||||
|
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
|
||||||
|
ReportAllocTypeMismatch((uptr)ptr, stack,
|
||||||
|
(AllocType)m->alloc_type, (AllocType)alloc_type);
|
||||||
// With REDZONE==16 m->next is in the user area, otherwise it should be 0.
|
// With REDZONE==16 m->next is in the user area, otherwise it should be 0.
|
||||||
CHECK(REDZONE <= 16 || !m->next);
|
CHECK(REDZONE <= 16 || !m->next);
|
||||||
CHECK(m->free_tid == kInvalidTid);
|
CHECK(m->free_tid == kInvalidTid);
|
||||||
|
|
@ -744,18 +657,19 @@ static u8 *Reallocate(u8 *old_ptr, uptr new_size,
|
||||||
CHECK(m->chunk_state == CHUNK_ALLOCATED);
|
CHECK(m->chunk_state == CHUNK_ALLOCATED);
|
||||||
uptr old_size = m->used_size;
|
uptr old_size = m->used_size;
|
||||||
uptr memcpy_size = Min(new_size, old_size);
|
uptr memcpy_size = Min(new_size, old_size);
|
||||||
u8 *new_ptr = Allocate(0, new_size, stack);
|
u8 *new_ptr = Allocate(0, new_size, stack, FROM_MALLOC);
|
||||||
if (new_ptr) {
|
if (new_ptr) {
|
||||||
CHECK(REAL(memcpy) != 0);
|
CHECK(REAL(memcpy) != 0);
|
||||||
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
|
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
|
||||||
Deallocate(old_ptr, stack);
|
Deallocate(old_ptr, stack, FROM_MALLOC);
|
||||||
}
|
}
|
||||||
return new_ptr;
|
return new_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
||||||
// Default (no-op) implementation of malloc hooks.
|
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||||
|
// Provide default (no-op) implementation of malloc hooks.
|
||||||
extern "C" {
|
extern "C" {
|
||||||
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
void __asan_malloc_hook(void *ptr, uptr size) {
|
void __asan_malloc_hook(void *ptr, uptr size) {
|
||||||
|
|
@ -767,53 +681,58 @@ void __asan_free_hook(void *ptr) {
|
||||||
(void)ptr;
|
(void)ptr;
|
||||||
}
|
}
|
||||||
} // extern "C"
|
} // extern "C"
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
|
void PrintInternalAllocatorStats() {
|
||||||
|
}
|
||||||
|
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE
|
SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack) {
|
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
|
||||||
void *ptr = (void*)Allocate(alignment, size, stack);
|
AllocType alloc_type) {
|
||||||
__asan_malloc_hook(ptr, size);
|
void *ptr = (void*)Allocate(alignment, size, stack, alloc_type);
|
||||||
|
ASAN_MALLOC_HOOK(ptr, size);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE
|
SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
void asan_free(void *ptr, StackTrace *stack) {
|
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
|
||||||
__asan_free_hook(ptr);
|
ASAN_FREE_HOOK(ptr);
|
||||||
Deallocate((u8*)ptr, stack);
|
Deallocate((u8*)ptr, stack, alloc_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE
|
SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
void *asan_malloc(uptr size, StackTrace *stack) {
|
void *asan_malloc(uptr size, StackTrace *stack) {
|
||||||
void *ptr = (void*)Allocate(0, size, stack);
|
void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC);
|
||||||
__asan_malloc_hook(ptr, size);
|
ASAN_MALLOC_HOOK(ptr, size);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
|
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
|
||||||
void *ptr = (void*)Allocate(0, nmemb * size, stack);
|
void *ptr = (void*)Allocate(0, nmemb * size, stack, FROM_MALLOC);
|
||||||
if (ptr)
|
if (ptr)
|
||||||
REAL(memset)(ptr, 0, nmemb * size);
|
REAL(memset)(ptr, 0, nmemb * size);
|
||||||
__asan_malloc_hook(ptr, nmemb * size);
|
ASAN_MALLOC_HOOK(ptr, size);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *asan_realloc(void *p, uptr size, StackTrace *stack) {
|
void *asan_realloc(void *p, uptr size, StackTrace *stack) {
|
||||||
if (p == 0) {
|
if (p == 0) {
|
||||||
void *ptr = (void*)Allocate(0, size, stack);
|
void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC);
|
||||||
__asan_malloc_hook(ptr, size);
|
ASAN_MALLOC_HOOK(ptr, size);
|
||||||
return ptr;
|
return ptr;
|
||||||
} else if (size == 0) {
|
} else if (size == 0) {
|
||||||
__asan_free_hook(p);
|
ASAN_FREE_HOOK(p);
|
||||||
Deallocate((u8*)p, stack);
|
Deallocate((u8*)p, stack, FROM_MALLOC);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return Reallocate((u8*)p, size, stack);
|
return Reallocate((u8*)p, size, stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *asan_valloc(uptr size, StackTrace *stack) {
|
void *asan_valloc(uptr size, StackTrace *stack) {
|
||||||
void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack);
|
void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack, FROM_MALLOC);
|
||||||
__asan_malloc_hook(ptr, size);
|
ASAN_MALLOC_HOOK(ptr, size);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -824,16 +743,16 @@ void *asan_pvalloc(uptr size, StackTrace *stack) {
|
||||||
// pvalloc(0) should allocate one page.
|
// pvalloc(0) should allocate one page.
|
||||||
size = PageSize;
|
size = PageSize;
|
||||||
}
|
}
|
||||||
void *ptr = (void*)Allocate(PageSize, size, stack);
|
void *ptr = (void*)Allocate(PageSize, size, stack, FROM_MALLOC);
|
||||||
__asan_malloc_hook(ptr, size);
|
ASAN_MALLOC_HOOK(ptr, size);
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||||
StackTrace *stack) {
|
StackTrace *stack) {
|
||||||
void *ptr = Allocate(alignment, size, stack);
|
void *ptr = Allocate(alignment, size, stack, FROM_MALLOC);
|
||||||
CHECK(IsAligned((uptr)ptr, alignment));
|
CHECK(IsAligned((uptr)ptr, alignment));
|
||||||
__asan_malloc_hook(ptr, size);
|
ASAN_MALLOC_HOOK(ptr, size);
|
||||||
*memptr = ptr;
|
*memptr = ptr;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
@ -860,170 +779,11 @@ void asan_mz_force_unlock() {
|
||||||
malloc_info.ForceUnlock();
|
malloc_info.ForceUnlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------- Fake stack-------------------- {{{1
|
|
||||||
FakeStack::FakeStack() {
|
|
||||||
CHECK(REAL(memset) != 0);
|
|
||||||
REAL(memset)(this, 0, sizeof(*this));
|
|
||||||
}
|
|
||||||
|
|
||||||
bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
|
|
||||||
uptr mem = allocated_size_classes_[size_class];
|
|
||||||
uptr size = ClassMmapSize(size_class);
|
|
||||||
bool res = mem && addr >= mem && addr < mem + size;
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr FakeStack::AddrIsInFakeStack(uptr addr) {
|
|
||||||
for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
|
|
||||||
if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// We may want to compute this during compilation.
|
|
||||||
inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
|
|
||||||
uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
|
|
||||||
uptr log = Log2(rounded_size);
|
|
||||||
CHECK(alloc_size <= (1UL << log));
|
|
||||||
if (!(alloc_size > (1UL << (log-1)))) {
|
|
||||||
Printf("alloc_size %zu log %zu\n", alloc_size, log);
|
|
||||||
}
|
|
||||||
CHECK(alloc_size > (1UL << (log-1)));
|
|
||||||
uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
|
|
||||||
CHECK(res < kNumberOfSizeClasses);
|
|
||||||
CHECK(ClassSize(res) >= rounded_size);
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FakeFrameFifo::FifoPush(FakeFrame *node) {
|
|
||||||
CHECK(node);
|
|
||||||
node->next = 0;
|
|
||||||
if (first_ == 0 && last_ == 0) {
|
|
||||||
first_ = last_ = node;
|
|
||||||
} else {
|
|
||||||
CHECK(first_);
|
|
||||||
CHECK(last_);
|
|
||||||
last_->next = node;
|
|
||||||
last_ = node;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
FakeFrame *FakeFrameFifo::FifoPop() {
|
|
||||||
CHECK(first_ && last_ && "Exhausted fake stack");
|
|
||||||
FakeFrame *res = 0;
|
|
||||||
if (first_ == last_) {
|
|
||||||
res = first_;
|
|
||||||
first_ = last_ = 0;
|
|
||||||
} else {
|
|
||||||
res = first_;
|
|
||||||
first_ = first_->next;
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FakeStack::Init(uptr stack_size) {
|
|
||||||
stack_size_ = stack_size;
|
|
||||||
alive_ = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FakeStack::Cleanup() {
|
|
||||||
alive_ = false;
|
|
||||||
for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
|
|
||||||
uptr mem = allocated_size_classes_[i];
|
|
||||||
if (mem) {
|
|
||||||
PoisonShadow(mem, ClassMmapSize(i), 0);
|
|
||||||
allocated_size_classes_[i] = 0;
|
|
||||||
UnmapOrDie((void*)mem, ClassMmapSize(i));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr FakeStack::ClassMmapSize(uptr size_class) {
|
|
||||||
return RoundUpToPowerOfTwo(stack_size_);
|
|
||||||
}
|
|
||||||
|
|
||||||
void FakeStack::AllocateOneSizeClass(uptr size_class) {
|
|
||||||
CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
|
|
||||||
uptr new_mem = (uptr)MmapOrDie(
|
|
||||||
ClassMmapSize(size_class), __FUNCTION__);
|
|
||||||
// Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
|
|
||||||
// asanThreadRegistry().GetCurrent()->tid(),
|
|
||||||
// size_class, new_mem, new_mem + ClassMmapSize(size_class),
|
|
||||||
// ClassMmapSize(size_class));
|
|
||||||
uptr i;
|
|
||||||
for (i = 0; i < ClassMmapSize(size_class);
|
|
||||||
i += ClassSize(size_class)) {
|
|
||||||
size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
|
|
||||||
}
|
|
||||||
CHECK(i == ClassMmapSize(size_class));
|
|
||||||
allocated_size_classes_[size_class] = new_mem;
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
|
|
||||||
if (!alive_) return real_stack;
|
|
||||||
CHECK(size <= kMaxStackMallocSize && size > 1);
|
|
||||||
uptr size_class = ComputeSizeClass(size);
|
|
||||||
if (!allocated_size_classes_[size_class]) {
|
|
||||||
AllocateOneSizeClass(size_class);
|
|
||||||
}
|
|
||||||
FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
|
|
||||||
CHECK(fake_frame);
|
|
||||||
fake_frame->size_minus_one = size - 1;
|
|
||||||
fake_frame->real_stack = real_stack;
|
|
||||||
while (FakeFrame *top = call_stack_.top()) {
|
|
||||||
if (top->real_stack > real_stack) break;
|
|
||||||
call_stack_.LifoPop();
|
|
||||||
DeallocateFrame(top);
|
|
||||||
}
|
|
||||||
call_stack_.LifoPush(fake_frame);
|
|
||||||
uptr ptr = (uptr)fake_frame;
|
|
||||||
PoisonShadow(ptr, size, 0);
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
|
|
||||||
CHECK(alive_);
|
|
||||||
uptr size = fake_frame->size_minus_one + 1;
|
|
||||||
uptr size_class = ComputeSizeClass(size);
|
|
||||||
CHECK(allocated_size_classes_[size_class]);
|
|
||||||
uptr ptr = (uptr)fake_frame;
|
|
||||||
CHECK(AddrIsInSizeClass(ptr, size_class));
|
|
||||||
CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
|
|
||||||
size_classes_[size_class].FifoPush(fake_frame);
|
|
||||||
}
|
|
||||||
|
|
||||||
void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
|
|
||||||
FakeFrame *fake_frame = (FakeFrame*)ptr;
|
|
||||||
CHECK(fake_frame->magic = kRetiredStackFrameMagic);
|
|
||||||
CHECK(fake_frame->descr != 0);
|
|
||||||
CHECK(fake_frame->size_minus_one == size - 1);
|
|
||||||
PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
||||||
// ---------------------- Interface ---------------- {{{1
|
// ---------------------- Interface ---------------- {{{1
|
||||||
using namespace __asan; // NOLINT
|
using namespace __asan; // NOLINT
|
||||||
|
|
||||||
uptr __asan_stack_malloc(uptr size, uptr real_stack) {
|
|
||||||
if (!flags()->use_fake_stack) return real_stack;
|
|
||||||
AsanThread *t = asanThreadRegistry().GetCurrent();
|
|
||||||
if (!t) {
|
|
||||||
// TSD is gone, use the real stack.
|
|
||||||
return real_stack;
|
|
||||||
}
|
|
||||||
uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
|
|
||||||
// Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
|
|
||||||
if (!flags()->use_fake_stack) return;
|
|
||||||
if (ptr != real_stack) {
|
|
||||||
FakeStack::OnFree(ptr, size, real_stack);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ASan allocator doesn't reserve extra bytes, so normally we would
|
// ASan allocator doesn't reserve extra bytes, so normally we would
|
||||||
// just return "size".
|
// just return "size".
|
||||||
uptr __asan_get_estimated_allocated_size(uptr size) {
|
uptr __asan_get_estimated_allocated_size(uptr size) {
|
||||||
|
|
@ -1040,8 +800,9 @@ uptr __asan_get_allocated_size(const void *p) {
|
||||||
uptr allocated_size = malloc_info.AllocationSize((uptr)p);
|
uptr allocated_size = malloc_info.AllocationSize((uptr)p);
|
||||||
// Die if p is not malloced or if it is already freed.
|
// Die if p is not malloced or if it is already freed.
|
||||||
if (allocated_size == 0) {
|
if (allocated_size == 0) {
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax);
|
GET_STACK_TRACE_FATAL_HERE;
|
||||||
ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack);
|
ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack);
|
||||||
}
|
}
|
||||||
return allocated_size;
|
return allocated_size;
|
||||||
}
|
}
|
||||||
|
#endif // ASAN_ALLOCATOR_VERSION
|
||||||
|
|
|
||||||
|
|
@ -15,9 +15,22 @@
|
||||||
|
|
||||||
#include "asan_internal.h"
|
#include "asan_internal.h"
|
||||||
#include "asan_interceptors.h"
|
#include "asan_interceptors.h"
|
||||||
|
#include "sanitizer_common/sanitizer_list.h"
|
||||||
|
|
||||||
|
// We are in the process of transitioning from the old allocator (version 1)
|
||||||
|
// to a new one (version 2). The change is quite intrusive so both allocators
|
||||||
|
// will co-exist in the source base for a while. The actual allocator is chosen
|
||||||
|
// at build time by redefining this macrozz.
|
||||||
|
#define ASAN_ALLOCATOR_VERSION 1
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
|
enum AllocType {
|
||||||
|
FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.
|
||||||
|
FROM_NEW = 2, // Memory block came from operator new.
|
||||||
|
FROM_NEW_BR = 3 // Memory block came from operator new [ ]
|
||||||
|
};
|
||||||
|
|
||||||
static const uptr kNumberOfSizeClasses = 255;
|
static const uptr kNumberOfSizeClasses = 255;
|
||||||
struct AsanChunk;
|
struct AsanChunk;
|
||||||
|
|
||||||
|
|
@ -32,16 +45,40 @@ class AsanChunkView {
|
||||||
uptr FreeTid();
|
uptr FreeTid();
|
||||||
void GetAllocStack(StackTrace *stack);
|
void GetAllocStack(StackTrace *stack);
|
||||||
void GetFreeStack(StackTrace *stack);
|
void GetFreeStack(StackTrace *stack);
|
||||||
bool AddrIsInside(uptr addr, uptr access_size, uptr *offset);
|
bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
|
||||||
bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset);
|
if (addr >= Beg() && (addr + access_size) <= End()) {
|
||||||
bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset);
|
*offset = addr - Beg();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
|
||||||
|
(void)access_size;
|
||||||
|
if (addr < Beg()) {
|
||||||
|
*offset = Beg() - addr;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
|
||||||
|
if (addr + access_size >= End()) {
|
||||||
|
if (addr <= End())
|
||||||
|
*offset = 0;
|
||||||
|
else
|
||||||
|
*offset = addr - End();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
AsanChunk *const chunk_;
|
AsanChunk *const chunk_;
|
||||||
};
|
};
|
||||||
|
|
||||||
AsanChunkView FindHeapChunkByAddress(uptr address);
|
AsanChunkView FindHeapChunkByAddress(uptr address);
|
||||||
|
|
||||||
class AsanChunkFifoList {
|
// List of AsanChunks with total size.
|
||||||
|
class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
|
||||||
public:
|
public:
|
||||||
explicit AsanChunkFifoList(LinkerInitialized) { }
|
explicit AsanChunkFifoList(LinkerInitialized) { }
|
||||||
AsanChunkFifoList() { clear(); }
|
AsanChunkFifoList() { clear(); }
|
||||||
|
|
@ -50,12 +87,10 @@ class AsanChunkFifoList {
|
||||||
AsanChunk *Pop();
|
AsanChunk *Pop();
|
||||||
uptr size() { return size_; }
|
uptr size() { return size_; }
|
||||||
void clear() {
|
void clear() {
|
||||||
first_ = last_ = 0;
|
IntrusiveList<AsanChunk>::clear();
|
||||||
size_ = 0;
|
size_ = 0;
|
||||||
}
|
}
|
||||||
private:
|
private:
|
||||||
AsanChunk *first_;
|
|
||||||
AsanChunk *last_;
|
|
||||||
uptr size_;
|
uptr size_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -68,7 +103,11 @@ struct AsanThreadLocalMallocStorage {
|
||||||
}
|
}
|
||||||
|
|
||||||
AsanChunkFifoList quarantine_;
|
AsanChunkFifoList quarantine_;
|
||||||
|
#if ASAN_ALLOCATOR_VERSION == 1
|
||||||
AsanChunk *free_lists_[kNumberOfSizeClasses];
|
AsanChunk *free_lists_[kNumberOfSizeClasses];
|
||||||
|
#else
|
||||||
|
uptr allocator2_cache[1024]; // Opaque.
|
||||||
|
#endif
|
||||||
void CommitBack();
|
void CommitBack();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -156,8 +195,9 @@ class FakeStack {
|
||||||
FakeFrameLifo call_stack_;
|
FakeFrameLifo call_stack_;
|
||||||
};
|
};
|
||||||
|
|
||||||
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack);
|
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
|
||||||
void asan_free(void *ptr, StackTrace *stack);
|
AllocType alloc_type);
|
||||||
|
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
|
||||||
|
|
||||||
void *asan_malloc(uptr size, StackTrace *stack);
|
void *asan_malloc(uptr size, StackTrace *stack);
|
||||||
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
|
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
|
||||||
|
|
@ -173,5 +213,52 @@ uptr asan_mz_size(const void *ptr);
|
||||||
void asan_mz_force_lock();
|
void asan_mz_force_lock();
|
||||||
void asan_mz_force_unlock();
|
void asan_mz_force_unlock();
|
||||||
|
|
||||||
|
void PrintInternalAllocatorStats();
|
||||||
|
|
||||||
|
// Log2 and RoundUpToPowerOfTwo should be inlined for performance.
|
||||||
|
#if defined(_WIN32) && !defined(__clang__)
|
||||||
|
extern "C" {
|
||||||
|
unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
|
||||||
|
unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
|
||||||
|
#if defined(_WIN64)
|
||||||
|
unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
|
||||||
|
unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static inline uptr Log2(uptr x) {
|
||||||
|
CHECK(IsPowerOfTwo(x));
|
||||||
|
#if !defined(_WIN32) || defined(__clang__)
|
||||||
|
return __builtin_ctzl(x);
|
||||||
|
#elif defined(_WIN64)
|
||||||
|
unsigned long ret; // NOLINT
|
||||||
|
_BitScanForward64(&ret, x);
|
||||||
|
return ret;
|
||||||
|
#else
|
||||||
|
unsigned long ret; // NOLINT
|
||||||
|
_BitScanForward(&ret, x);
|
||||||
|
return ret;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline uptr RoundUpToPowerOfTwo(uptr size) {
|
||||||
|
CHECK(size);
|
||||||
|
if (IsPowerOfTwo(size)) return size;
|
||||||
|
|
||||||
|
unsigned long up; // NOLINT
|
||||||
|
#if !defined(_WIN32) || defined(__clang__)
|
||||||
|
up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
|
||||||
|
#elif defined(_WIN64)
|
||||||
|
_BitScanReverse64(&up, size);
|
||||||
|
#else
|
||||||
|
_BitScanReverse(&up, size);
|
||||||
|
#endif
|
||||||
|
CHECK(size < (1ULL << (up + 1)));
|
||||||
|
CHECK(size > (1ULL << up));
|
||||||
|
return 1UL << (up + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
#endif // ASAN_ALLOCATOR_H
|
#endif // ASAN_ALLOCATOR_H
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,714 @@
|
||||||
|
//===-- asan_allocator2.cc ------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||||
|
//
|
||||||
|
// Implementation of ASan's memory allocator, 2-nd version.
|
||||||
|
// This variant uses the allocator from sanitizer_common, i.e. the one shared
|
||||||
|
// with ThreadSanitizer and MemorySanitizer.
|
||||||
|
//
|
||||||
|
// Status: under development, not enabled by default yet.
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
#include "asan_allocator.h"
|
||||||
|
#if ASAN_ALLOCATOR_VERSION == 2
|
||||||
|
|
||||||
|
#include "asan_mapping.h"
|
||||||
|
#include "asan_report.h"
|
||||||
|
#include "asan_thread.h"
|
||||||
|
#include "asan_thread_registry.h"
|
||||||
|
#include "sanitizer/asan_interface.h"
|
||||||
|
#include "sanitizer_common/sanitizer_allocator.h"
|
||||||
|
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||||
|
#include "sanitizer_common/sanitizer_list.h"
|
||||||
|
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||||
|
|
||||||
|
namespace __asan {
|
||||||
|
|
||||||
|
struct AsanMapUnmapCallback {
|
||||||
|
void OnMap(uptr p, uptr size) const {
|
||||||
|
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
|
||||||
|
// Statistics.
|
||||||
|
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
|
||||||
|
thread_stats.mmaps++;
|
||||||
|
thread_stats.mmaped += size;
|
||||||
|
}
|
||||||
|
void OnUnmap(uptr p, uptr size) const {
|
||||||
|
PoisonShadow(p, size, 0);
|
||||||
|
// We are about to unmap a chunk of user memory.
|
||||||
|
// Mark the corresponding shadow memory as not needed.
|
||||||
|
// Since asan's mapping is compacting, the shadow chunk may be
|
||||||
|
// not page-aligned, so we only flush the page-aligned portion.
|
||||||
|
uptr page_size = GetPageSizeCached();
|
||||||
|
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
|
||||||
|
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
|
||||||
|
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
|
||||||
|
// Statistics.
|
||||||
|
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
|
||||||
|
thread_stats.munmaps++;
|
||||||
|
thread_stats.munmaped += size;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#if SANITIZER_WORDSIZE == 64
|
||||||
|
const uptr kAllocatorSpace = 0x600000000000ULL;
|
||||||
|
const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
|
||||||
|
typedef DefaultSizeClassMap SizeClassMap;
|
||||||
|
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
|
||||||
|
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
|
||||||
|
#elif SANITIZER_WORDSIZE == 32
|
||||||
|
static const u64 kAddressSpaceSize = 1ULL << 32;
|
||||||
|
typedef CompactSizeClassMap SizeClassMap;
|
||||||
|
typedef SizeClassAllocator32<0, kAddressSpaceSize, 16,
|
||||||
|
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||||
|
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
|
||||||
|
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
||||||
|
SecondaryAllocator> Allocator;
|
||||||
|
|
||||||
|
// We can not use THREADLOCAL because it is not supported on some of the
|
||||||
|
// platforms we care about (OSX 10.6, Android).
|
||||||
|
// static THREADLOCAL AllocatorCache cache;
|
||||||
|
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
|
||||||
|
CHECK(ms);
|
||||||
|
CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator2_cache));
|
||||||
|
return reinterpret_cast<AllocatorCache *>(ms->allocator2_cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
static Allocator allocator;
|
||||||
|
|
||||||
|
static const uptr kMaxAllowedMallocSize =
|
||||||
|
FIRST_32_SECOND_64(3UL << 30, 8UL << 30);
|
||||||
|
|
||||||
|
static const uptr kMaxThreadLocalQuarantine =
|
||||||
|
FIRST_32_SECOND_64(1 << 18, 1 << 20);
|
||||||
|
|
||||||
|
static const uptr kReturnOnZeroMalloc = 2048; // Zero page is protected.
|
||||||
|
|
||||||
|
static int inited = 0;
|
||||||
|
|
||||||
|
static void Init() {
|
||||||
|
if (inited) return;
|
||||||
|
__asan_init();
|
||||||
|
inited = true; // this must happen before any threads are created.
|
||||||
|
allocator.Init();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Every chunk of memory allocated by this allocator can be in one of 3 states:
|
||||||
|
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
|
||||||
|
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
|
||||||
|
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
|
||||||
|
enum {
|
||||||
|
CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
|
||||||
|
CHUNK_ALLOCATED = 2,
|
||||||
|
CHUNK_QUARANTINE = 3
|
||||||
|
};
|
||||||
|
|
||||||
|
// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
|
||||||
|
// We use adaptive redzones: for larger allocation larger redzones are used.
|
||||||
|
static u32 RZLog2Size(u32 rz_log) {
|
||||||
|
CHECK_LT(rz_log, 8);
|
||||||
|
return 16 << rz_log;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u32 RZSize2Log(u32 rz_size) {
|
||||||
|
CHECK_GE(rz_size, 16);
|
||||||
|
CHECK_LE(rz_size, 2048);
|
||||||
|
CHECK(IsPowerOfTwo(rz_size));
|
||||||
|
u32 res = __builtin_ctz(rz_size) - 4;
|
||||||
|
CHECK_EQ(rz_size, RZLog2Size(res));
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uptr ComputeRZLog(uptr user_requested_size) {
|
||||||
|
u32 rz_log =
|
||||||
|
user_requested_size <= 64 - 16 ? 0 :
|
||||||
|
user_requested_size <= 128 - 32 ? 1 :
|
||||||
|
user_requested_size <= 512 - 64 ? 2 :
|
||||||
|
user_requested_size <= 4096 - 128 ? 3 :
|
||||||
|
user_requested_size <= (1 << 14) - 256 ? 4 :
|
||||||
|
user_requested_size <= (1 << 15) - 512 ? 5 :
|
||||||
|
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
|
||||||
|
return Max(rz_log, RZSize2Log(flags()->redzone));
|
||||||
|
}
|
||||||
|
|
||||||
|
// The memory chunk allocated from the underlying allocator looks like this:
|
||||||
|
// L L L L L L H H U U U U U U R R
|
||||||
|
// L -- left redzone words (0 or more bytes)
|
||||||
|
// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
|
||||||
|
// U -- user memory.
|
||||||
|
// R -- right redzone (0 or more bytes)
|
||||||
|
// ChunkBase consists of ChunkHeader and other bytes that overlap with user
|
||||||
|
// memory.
|
||||||
|
|
||||||
|
// If a memory chunk is allocated by memalign and we had to increase the
|
||||||
|
// allocation size to achieve the proper alignment, then we store this magic
|
||||||
|
// value in the first uptr word of the memory block and store the address of
|
||||||
|
// ChunkBase in the next uptr.
|
||||||
|
// M B ? ? ? L L L L L L H H U U U U U U
|
||||||
|
// M -- magic value kMemalignMagic
|
||||||
|
// B -- address of ChunkHeader pointing to the first 'H'
|
||||||
|
static const uptr kMemalignMagic = 0xCC6E96B9;
|
||||||
|
|
||||||
|
struct ChunkHeader {
|
||||||
|
// 1-st 8 bytes.
|
||||||
|
u32 chunk_state : 8; // Must be first.
|
||||||
|
u32 alloc_tid : 24;
|
||||||
|
|
||||||
|
u32 free_tid : 24;
|
||||||
|
u32 from_memalign : 1;
|
||||||
|
u32 alloc_type : 2;
|
||||||
|
u32 rz_log : 3;
|
||||||
|
// 2-nd 8 bytes
|
||||||
|
// This field is used for small sizes. For large sizes it is equal to
|
||||||
|
// SizeClassMap::kMaxSize and the actual size is stored in the
|
||||||
|
// SecondaryAllocator's metadata.
|
||||||
|
u32 user_requested_size;
|
||||||
|
u32 alloc_context_id;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ChunkBase : ChunkHeader {
|
||||||
|
// Header2, intersects with user memory.
|
||||||
|
AsanChunk *next;
|
||||||
|
u32 free_context_id;
|
||||||
|
};
|
||||||
|
|
||||||
|
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
|
||||||
|
static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
|
||||||
|
COMPILER_CHECK(kChunkHeaderSize == 16);
|
||||||
|
COMPILER_CHECK(kChunkHeader2Size <= 16);
|
||||||
|
|
||||||
|
struct AsanChunk: ChunkBase {
|
||||||
|
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
|
||||||
|
uptr UsedSize() {
|
||||||
|
if (user_requested_size != SizeClassMap::kMaxSize)
|
||||||
|
return user_requested_size;
|
||||||
|
return *reinterpret_cast<uptr *>(allocator.GetMetaData(AllocBeg()));
|
||||||
|
}
|
||||||
|
void *AllocBeg() {
|
||||||
|
if (from_memalign)
|
||||||
|
return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
|
||||||
|
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
|
||||||
|
}
|
||||||
|
// We store the alloc/free stack traces in the chunk itself.
|
||||||
|
u32 *AllocStackBeg() {
|
||||||
|
return (u32*)(Beg() - RZLog2Size(rz_log));
|
||||||
|
}
|
||||||
|
uptr AllocStackSize() {
|
||||||
|
CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
|
||||||
|
return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
|
||||||
|
}
|
||||||
|
u32 *FreeStackBeg() {
|
||||||
|
return (u32*)(Beg() + kChunkHeader2Size);
|
||||||
|
}
|
||||||
|
uptr FreeStackSize() {
|
||||||
|
if (user_requested_size < kChunkHeader2Size) return 0;
|
||||||
|
uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
|
||||||
|
return (available - kChunkHeader2Size) / sizeof(u32);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
|
||||||
|
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
|
||||||
|
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
|
||||||
|
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
|
||||||
|
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
|
||||||
|
|
||||||
|
static void GetStackTraceFromId(u32 id, StackTrace *stack) {
|
||||||
|
CHECK(id);
|
||||||
|
uptr size = 0;
|
||||||
|
const uptr *trace = StackDepotGet(id, &size);
|
||||||
|
CHECK_LT(size, kStackTraceMax);
|
||||||
|
internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
|
||||||
|
stack->size = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AsanChunkView::GetAllocStack(StackTrace *stack) {
|
||||||
|
if (flags()->use_stack_depot)
|
||||||
|
GetStackTraceFromId(chunk_->alloc_context_id, stack);
|
||||||
|
else
|
||||||
|
StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
|
||||||
|
chunk_->AllocStackSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
void AsanChunkView::GetFreeStack(StackTrace *stack) {
|
||||||
|
if (flags()->use_stack_depot)
|
||||||
|
GetStackTraceFromId(chunk_->free_context_id, stack);
|
||||||
|
else
|
||||||
|
StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
|
||||||
|
chunk_->FreeStackSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
class Quarantine: public AsanChunkFifoList {
|
||||||
|
public:
|
||||||
|
void SwallowThreadLocalQuarantine(AsanThreadLocalMallocStorage *ms) {
|
||||||
|
AsanChunkFifoList *q = &ms->quarantine_;
|
||||||
|
if (!q->size()) return;
|
||||||
|
SpinMutexLock l(&mutex_);
|
||||||
|
PushList(q);
|
||||||
|
PopAndDeallocateLoop(ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BypassThreadLocalQuarantine(AsanChunk *m) {
|
||||||
|
SpinMutexLock l(&mutex_);
|
||||||
|
Push(m);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void PopAndDeallocateLoop(AsanThreadLocalMallocStorage *ms) {
|
||||||
|
while (size() > (uptr)flags()->quarantine_size) {
|
||||||
|
PopAndDeallocate(ms);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void PopAndDeallocate(AsanThreadLocalMallocStorage *ms) {
|
||||||
|
CHECK_GT(size(), 0);
|
||||||
|
AsanChunk *m = Pop();
|
||||||
|
CHECK(m);
|
||||||
|
CHECK(m->chunk_state == CHUNK_QUARANTINE);
|
||||||
|
m->chunk_state = CHUNK_AVAILABLE;
|
||||||
|
CHECK_NE(m->alloc_tid, kInvalidTid);
|
||||||
|
CHECK_NE(m->free_tid, kInvalidTid);
|
||||||
|
PoisonShadow(m->Beg(),
|
||||||
|
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
|
||||||
|
kAsanHeapLeftRedzoneMagic);
|
||||||
|
void *p = reinterpret_cast<void *>(m->AllocBeg());
|
||||||
|
if (m->from_memalign) {
|
||||||
|
uptr *memalign_magic = reinterpret_cast<uptr *>(p);
|
||||||
|
CHECK_EQ(memalign_magic[0], kMemalignMagic);
|
||||||
|
CHECK_EQ(memalign_magic[1], reinterpret_cast<uptr>(m));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Statistics.
|
||||||
|
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
|
||||||
|
thread_stats.real_frees++;
|
||||||
|
thread_stats.really_freed += m->UsedSize();
|
||||||
|
|
||||||
|
allocator.Deallocate(GetAllocatorCache(ms), p);
|
||||||
|
}
|
||||||
|
SpinMutex mutex_;
|
||||||
|
};
|
||||||
|
|
||||||
|
static Quarantine quarantine;
|
||||||
|
|
||||||
|
void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
|
||||||
|
CHECK(q->size() > 0);
|
||||||
|
size_ += q->size();
|
||||||
|
append_back(q);
|
||||||
|
q->clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AsanChunkFifoList::Push(AsanChunk *n) {
|
||||||
|
push_back(n);
|
||||||
|
size_ += n->UsedSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interesting performance observation: this function takes up to 15% of overal
|
||||||
|
// allocator time. That's because *first_ has been evicted from cache long time
|
||||||
|
// ago. Not sure if we can or want to do anything with this.
|
||||||
|
AsanChunk *AsanChunkFifoList::Pop() {
|
||||||
|
CHECK(first_);
|
||||||
|
AsanChunk *res = front();
|
||||||
|
size_ -= res->UsedSize();
|
||||||
|
pop_front();
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
|
||||||
|
AllocType alloc_type) {
|
||||||
|
Init();
|
||||||
|
CHECK(stack);
|
||||||
|
const uptr min_alignment = SHADOW_GRANULARITY;
|
||||||
|
if (alignment < min_alignment)
|
||||||
|
alignment = min_alignment;
|
||||||
|
if (size == 0) {
|
||||||
|
if (alignment <= kReturnOnZeroMalloc)
|
||||||
|
return reinterpret_cast<void *>(kReturnOnZeroMalloc);
|
||||||
|
else
|
||||||
|
return 0; // 0 bytes with large alignment requested. Just return 0.
|
||||||
|
}
|
||||||
|
CHECK(IsPowerOfTwo(alignment));
|
||||||
|
uptr rz_log = ComputeRZLog(size);
|
||||||
|
uptr rz_size = RZLog2Size(rz_log);
|
||||||
|
uptr rounded_size = RoundUpTo(size, alignment);
|
||||||
|
if (rounded_size < kChunkHeader2Size)
|
||||||
|
rounded_size = kChunkHeader2Size;
|
||||||
|
uptr needed_size = rounded_size + rz_size;
|
||||||
|
if (alignment > min_alignment)
|
||||||
|
needed_size += alignment;
|
||||||
|
bool using_primary_allocator = true;
|
||||||
|
// If we are allocating from the secondary allocator, there will be no
|
||||||
|
// automatic right redzone, so add the right redzone manually.
|
||||||
|
if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
|
||||||
|
needed_size += rz_size;
|
||||||
|
using_primary_allocator = false;
|
||||||
|
}
|
||||||
|
CHECK(IsAligned(needed_size, min_alignment));
|
||||||
|
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
|
||||||
|
Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
|
||||||
|
(void*)size);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
AsanThread *t = asanThreadRegistry().GetCurrent();
|
||||||
|
AllocatorCache *cache = t ? GetAllocatorCache(&t->malloc_storage()) : 0;
|
||||||
|
void *allocated = allocator.Allocate(cache, needed_size, 8, false);
|
||||||
|
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
|
||||||
|
uptr alloc_end = alloc_beg + needed_size;
|
||||||
|
uptr beg_plus_redzone = alloc_beg + rz_size;
|
||||||
|
uptr user_beg = beg_plus_redzone;
|
||||||
|
if (!IsAligned(user_beg, alignment))
|
||||||
|
user_beg = RoundUpTo(user_beg, alignment);
|
||||||
|
uptr user_end = user_beg + size;
|
||||||
|
CHECK_LE(user_end, alloc_end);
|
||||||
|
uptr chunk_beg = user_beg - kChunkHeaderSize;
|
||||||
|
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||||
|
m->chunk_state = CHUNK_ALLOCATED;
|
||||||
|
m->alloc_type = alloc_type;
|
||||||
|
m->rz_log = rz_log;
|
||||||
|
u32 alloc_tid = t ? t->tid() : 0;
|
||||||
|
m->alloc_tid = alloc_tid;
|
||||||
|
CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
|
||||||
|
m->free_tid = kInvalidTid;
|
||||||
|
m->from_memalign = user_beg != beg_plus_redzone;
|
||||||
|
if (m->from_memalign) {
|
||||||
|
CHECK_LE(beg_plus_redzone + 2 * sizeof(uptr), user_beg);
|
||||||
|
uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
|
||||||
|
memalign_magic[0] = kMemalignMagic;
|
||||||
|
memalign_magic[1] = chunk_beg;
|
||||||
|
}
|
||||||
|
if (using_primary_allocator) {
|
||||||
|
CHECK(size);
|
||||||
|
m->user_requested_size = size;
|
||||||
|
CHECK(allocator.FromPrimary(allocated));
|
||||||
|
} else {
|
||||||
|
CHECK(!allocator.FromPrimary(allocated));
|
||||||
|
m->user_requested_size = SizeClassMap::kMaxSize;
|
||||||
|
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
|
||||||
|
meta[0] = size;
|
||||||
|
meta[1] = chunk_beg;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (flags()->use_stack_depot) {
|
||||||
|
m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
|
||||||
|
} else {
|
||||||
|
m->alloc_context_id = 0;
|
||||||
|
StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
|
||||||
|
// Unpoison the bulk of the memory region.
|
||||||
|
if (size_rounded_down_to_granularity)
|
||||||
|
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
|
||||||
|
// Deal with the end of the region if size is not aligned to granularity.
|
||||||
|
if (size != size_rounded_down_to_granularity && flags()->poison_heap) {
|
||||||
|
u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
|
||||||
|
*shadow = size & (SHADOW_GRANULARITY - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
|
||||||
|
thread_stats.mallocs++;
|
||||||
|
thread_stats.malloced += size;
|
||||||
|
thread_stats.malloced_redzones += needed_size - size;
|
||||||
|
uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
|
||||||
|
thread_stats.malloced_by_size[class_id]++;
|
||||||
|
if (needed_size > SizeClassMap::kMaxSize)
|
||||||
|
thread_stats.malloc_large++;
|
||||||
|
|
||||||
|
void *res = reinterpret_cast<void *>(user_beg);
|
||||||
|
ASAN_MALLOC_HOOK(res, size);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
|
||||||
|
uptr p = reinterpret_cast<uptr>(ptr);
|
||||||
|
if (p == 0 || p == kReturnOnZeroMalloc) return;
|
||||||
|
uptr chunk_beg = p - kChunkHeaderSize;
|
||||||
|
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||||
|
|
||||||
|
// Flip the chunk_state atomically to avoid race on double-free.
|
||||||
|
u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
|
||||||
|
memory_order_acq_rel);
|
||||||
|
|
||||||
|
if (old_chunk_state == CHUNK_QUARANTINE)
|
||||||
|
ReportDoubleFree((uptr)ptr, stack);
|
||||||
|
else if (old_chunk_state != CHUNK_ALLOCATED)
|
||||||
|
ReportFreeNotMalloced((uptr)ptr, stack);
|
||||||
|
CHECK(old_chunk_state == CHUNK_ALLOCATED);
|
||||||
|
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
|
||||||
|
ReportAllocTypeMismatch((uptr)ptr, stack,
|
||||||
|
(AllocType)m->alloc_type, (AllocType)alloc_type);
|
||||||
|
|
||||||
|
CHECK_GE(m->alloc_tid, 0);
|
||||||
|
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
|
||||||
|
CHECK_EQ(m->free_tid, kInvalidTid);
|
||||||
|
AsanThread *t = asanThreadRegistry().GetCurrent();
|
||||||
|
m->free_tid = t ? t->tid() : 0;
|
||||||
|
if (flags()->use_stack_depot) {
|
||||||
|
m->free_context_id = StackDepotPut(stack->trace, stack->size);
|
||||||
|
} else {
|
||||||
|
m->free_context_id = 0;
|
||||||
|
StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
|
||||||
|
}
|
||||||
|
CHECK(m->chunk_state == CHUNK_QUARANTINE);
|
||||||
|
// Poison the region.
|
||||||
|
PoisonShadow(m->Beg(),
|
||||||
|
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
|
||||||
|
kAsanHeapFreeMagic);
|
||||||
|
|
||||||
|
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
|
||||||
|
thread_stats.frees++;
|
||||||
|
thread_stats.freed += m->UsedSize();
|
||||||
|
|
||||||
|
// Push into quarantine.
|
||||||
|
if (t) {
|
||||||
|
AsanChunkFifoList &q = t->malloc_storage().quarantine_;
|
||||||
|
q.Push(m);
|
||||||
|
|
||||||
|
if (q.size() > kMaxThreadLocalQuarantine)
|
||||||
|
quarantine.SwallowThreadLocalQuarantine(&t->malloc_storage());
|
||||||
|
} else {
|
||||||
|
quarantine.BypassThreadLocalQuarantine(m);
|
||||||
|
}
|
||||||
|
|
||||||
|
ASAN_FREE_HOOK(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) {
|
||||||
|
CHECK(old_ptr && new_size);
|
||||||
|
uptr p = reinterpret_cast<uptr>(old_ptr);
|
||||||
|
uptr chunk_beg = p - kChunkHeaderSize;
|
||||||
|
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||||
|
|
||||||
|
AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
|
||||||
|
thread_stats.reallocs++;
|
||||||
|
thread_stats.realloced += new_size;
|
||||||
|
|
||||||
|
CHECK(m->chunk_state == CHUNK_ALLOCATED);
|
||||||
|
uptr old_size = m->UsedSize();
|
||||||
|
uptr memcpy_size = Min(new_size, old_size);
|
||||||
|
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
|
||||||
|
if (new_ptr) {
|
||||||
|
CHECK(REAL(memcpy) != 0);
|
||||||
|
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
|
||||||
|
Deallocate(old_ptr, stack, FROM_MALLOC);
|
||||||
|
}
|
||||||
|
return new_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static AsanChunk *GetAsanChunkByAddr(uptr p) {
|
||||||
|
void *ptr = reinterpret_cast<void *>(p);
|
||||||
|
uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr));
|
||||||
|
if (!alloc_beg) return 0;
|
||||||
|
uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
|
||||||
|
if (memalign_magic[0] == kMemalignMagic) {
|
||||||
|
AsanChunk *m = reinterpret_cast<AsanChunk *>(memalign_magic[1]);
|
||||||
|
CHECK(m->from_memalign);
|
||||||
|
return m;
|
||||||
|
}
|
||||||
|
if (!allocator.FromPrimary(ptr)) {
|
||||||
|
uptr *meta = reinterpret_cast<uptr *>(
|
||||||
|
allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg)));
|
||||||
|
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
|
||||||
|
return m;
|
||||||
|
}
|
||||||
|
uptr actual_size = allocator.GetActuallyAllocatedSize(ptr);
|
||||||
|
CHECK_LE(actual_size, SizeClassMap::kMaxSize);
|
||||||
|
// We know the actually allocted size, but we don't know the redzone size.
|
||||||
|
// Just try all possible redzone sizes.
|
||||||
|
for (u32 rz_log = 0; rz_log < 8; rz_log++) {
|
||||||
|
u32 rz_size = RZLog2Size(rz_log);
|
||||||
|
uptr max_possible_size = actual_size - rz_size;
|
||||||
|
if (ComputeRZLog(max_possible_size) != rz_log)
|
||||||
|
continue;
|
||||||
|
return reinterpret_cast<AsanChunk *>(
|
||||||
|
alloc_beg + rz_size - kChunkHeaderSize);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static uptr AllocationSize(uptr p) {
|
||||||
|
AsanChunk *m = GetAsanChunkByAddr(p);
|
||||||
|
if (!m) return 0;
|
||||||
|
if (m->chunk_state != CHUNK_ALLOCATED) return 0;
|
||||||
|
if (m->Beg() != p) return 0;
|
||||||
|
return m->UsedSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have an address between two chunks, and we want to report just one.
|
||||||
|
AsanChunk *ChooseChunk(uptr addr,
|
||||||
|
AsanChunk *left_chunk, AsanChunk *right_chunk) {
|
||||||
|
// Prefer an allocated chunk over freed chunk and freed chunk
|
||||||
|
// over available chunk.
|
||||||
|
if (left_chunk->chunk_state != right_chunk->chunk_state) {
|
||||||
|
if (left_chunk->chunk_state == CHUNK_ALLOCATED)
|
||||||
|
return left_chunk;
|
||||||
|
if (right_chunk->chunk_state == CHUNK_ALLOCATED)
|
||||||
|
return right_chunk;
|
||||||
|
if (left_chunk->chunk_state == CHUNK_QUARANTINE)
|
||||||
|
return left_chunk;
|
||||||
|
if (right_chunk->chunk_state == CHUNK_QUARANTINE)
|
||||||
|
return right_chunk;
|
||||||
|
}
|
||||||
|
// Same chunk_state: choose based on offset.
|
||||||
|
uptr l_offset = 0, r_offset = 0;
|
||||||
|
CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
|
||||||
|
CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
|
||||||
|
if (l_offset < r_offset)
|
||||||
|
return left_chunk;
|
||||||
|
return right_chunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
AsanChunkView FindHeapChunkByAddress(uptr addr) {
|
||||||
|
AsanChunk *m1 = GetAsanChunkByAddr(addr);
|
||||||
|
if (!m1) return AsanChunkView(m1);
|
||||||
|
uptr offset = 0;
|
||||||
|
if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
|
||||||
|
// The address is in the chunk's left redzone, so maybe it is actually
|
||||||
|
// a right buffer overflow from the other chunk to the left.
|
||||||
|
// Search a bit to the left to see if there is another chunk.
|
||||||
|
AsanChunk *m2 = 0;
|
||||||
|
for (uptr l = 1; l < GetPageSizeCached(); l++) {
|
||||||
|
m2 = GetAsanChunkByAddr(addr - l);
|
||||||
|
if (m2 == m1) continue; // Still the same chunk.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
|
||||||
|
m1 = ChooseChunk(addr, m2, m1);
|
||||||
|
}
|
||||||
|
return AsanChunkView(m1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AsanThreadLocalMallocStorage::CommitBack() {
|
||||||
|
quarantine.SwallowThreadLocalQuarantine(this);
|
||||||
|
allocator.SwallowCache(GetAllocatorCache(this));
|
||||||
|
}
|
||||||
|
|
||||||
|
void PrintInternalAllocatorStats() {
|
||||||
|
allocator.PrintStats();
|
||||||
|
}
|
||||||
|
|
||||||
|
SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
|
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
|
||||||
|
AllocType alloc_type) {
|
||||||
|
return Allocate(size, alignment, stack, alloc_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
|
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
|
||||||
|
Deallocate(ptr, stack, alloc_type);
|
||||||
|
}
|
||||||
|
|
||||||
|
SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
|
void *asan_malloc(uptr size, StackTrace *stack) {
|
||||||
|
return Allocate(size, 8, stack, FROM_MALLOC);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
|
||||||
|
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
|
||||||
|
if (ptr)
|
||||||
|
REAL(memset)(ptr, 0, nmemb * size);
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *asan_realloc(void *p, uptr size, StackTrace *stack) {
|
||||||
|
if (p == 0)
|
||||||
|
return Allocate(size, 8, stack, FROM_MALLOC);
|
||||||
|
if (size == 0) {
|
||||||
|
Deallocate(p, stack, FROM_MALLOC);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return Reallocate(p, size, stack);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *asan_valloc(uptr size, StackTrace *stack) {
|
||||||
|
return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *asan_pvalloc(uptr size, StackTrace *stack) {
|
||||||
|
uptr PageSize = GetPageSizeCached();
|
||||||
|
size = RoundUpTo(size, PageSize);
|
||||||
|
if (size == 0) {
|
||||||
|
// pvalloc(0) should allocate one page.
|
||||||
|
size = PageSize;
|
||||||
|
}
|
||||||
|
return Allocate(size, PageSize, stack, FROM_MALLOC);
|
||||||
|
}
|
||||||
|
|
||||||
|
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||||
|
StackTrace *stack) {
|
||||||
|
void *ptr = Allocate(size, alignment, stack, FROM_MALLOC);
|
||||||
|
CHECK(IsAligned((uptr)ptr, alignment));
|
||||||
|
*memptr = ptr;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
|
||||||
|
CHECK(stack);
|
||||||
|
if (ptr == 0) return 0;
|
||||||
|
uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
|
||||||
|
if (flags()->check_malloc_usable_size && (usable_size == 0))
|
||||||
|
ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
|
||||||
|
return usable_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr asan_mz_size(const void *ptr) {
|
||||||
|
UNIMPLEMENTED();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void asan_mz_force_lock() {
|
||||||
|
UNIMPLEMENTED();
|
||||||
|
}
|
||||||
|
|
||||||
|
void asan_mz_force_unlock() {
|
||||||
|
UNIMPLEMENTED();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace __asan
|
||||||
|
|
||||||
|
// ---------------------- Interface ---------------- {{{1
|
||||||
|
using namespace __asan; // NOLINT
|
||||||
|
|
||||||
|
// ASan allocator doesn't reserve extra bytes, so normally we would
|
||||||
|
// just return "size". We don't want to expose our redzone sizes, etc here.
|
||||||
|
uptr __asan_get_estimated_allocated_size(uptr size) {
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool __asan_get_ownership(const void *p) {
|
||||||
|
return AllocationSize(reinterpret_cast<uptr>(p)) > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr __asan_get_allocated_size(const void *p) {
|
||||||
|
if (p == 0) return 0;
|
||||||
|
uptr allocated_size = AllocationSize(reinterpret_cast<uptr>(p));
|
||||||
|
// Die if p is not malloced or if it is already freed.
|
||||||
|
if (allocated_size == 0) {
|
||||||
|
GET_STACK_TRACE_FATAL_HERE;
|
||||||
|
ReportAsanGetAllocatedSizeNotOwned(reinterpret_cast<uptr>(p), &stack);
|
||||||
|
}
|
||||||
|
return allocated_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||||
|
// Provide default (no-op) implementation of malloc hooks.
|
||||||
|
extern "C" {
|
||||||
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
|
void __asan_malloc_hook(void *ptr, uptr size) {
|
||||||
|
(void)ptr;
|
||||||
|
(void)size;
|
||||||
|
}
|
||||||
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
|
void __asan_free_hook(void *ptr) {
|
||||||
|
(void)ptr;
|
||||||
|
}
|
||||||
|
} // extern "C"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#endif // ASAN_ALLOCATOR_VERSION
|
||||||
|
|
@ -0,0 +1,180 @@
|
||||||
|
//===-- asan_fake_stack.cc ------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||||
|
//
|
||||||
|
// FakeStack is used to detect use-after-return bugs.
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
#include "asan_allocator.h"
|
||||||
|
#include "asan_thread.h"
|
||||||
|
#include "asan_thread_registry.h"
|
||||||
|
#include "sanitizer/asan_interface.h"
|
||||||
|
|
||||||
|
namespace __asan {
|
||||||
|
|
||||||
|
FakeStack::FakeStack() {
|
||||||
|
CHECK(REAL(memset) != 0);
|
||||||
|
REAL(memset)(this, 0, sizeof(*this));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
|
||||||
|
uptr mem = allocated_size_classes_[size_class];
|
||||||
|
uptr size = ClassMmapSize(size_class);
|
||||||
|
bool res = mem && addr >= mem && addr < mem + size;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr FakeStack::AddrIsInFakeStack(uptr addr) {
|
||||||
|
for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
|
||||||
|
if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We may want to compute this during compilation.
|
||||||
|
inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
|
||||||
|
uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
|
||||||
|
uptr log = Log2(rounded_size);
|
||||||
|
CHECK(alloc_size <= (1UL << log));
|
||||||
|
if (!(alloc_size > (1UL << (log-1)))) {
|
||||||
|
Printf("alloc_size %zu log %zu\n", alloc_size, log);
|
||||||
|
}
|
||||||
|
CHECK(alloc_size > (1UL << (log-1)));
|
||||||
|
uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
|
||||||
|
CHECK(res < kNumberOfSizeClasses);
|
||||||
|
CHECK(ClassSize(res) >= rounded_size);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FakeFrameFifo::FifoPush(FakeFrame *node) {
|
||||||
|
CHECK(node);
|
||||||
|
node->next = 0;
|
||||||
|
if (first_ == 0 && last_ == 0) {
|
||||||
|
first_ = last_ = node;
|
||||||
|
} else {
|
||||||
|
CHECK(first_);
|
||||||
|
CHECK(last_);
|
||||||
|
last_->next = node;
|
||||||
|
last_ = node;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
FakeFrame *FakeFrameFifo::FifoPop() {
|
||||||
|
CHECK(first_ && last_ && "Exhausted fake stack");
|
||||||
|
FakeFrame *res = 0;
|
||||||
|
if (first_ == last_) {
|
||||||
|
res = first_;
|
||||||
|
first_ = last_ = 0;
|
||||||
|
} else {
|
||||||
|
res = first_;
|
||||||
|
first_ = first_->next;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FakeStack::Init(uptr stack_size) {
|
||||||
|
stack_size_ = stack_size;
|
||||||
|
alive_ = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FakeStack::Cleanup() {
|
||||||
|
alive_ = false;
|
||||||
|
for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
|
||||||
|
uptr mem = allocated_size_classes_[i];
|
||||||
|
if (mem) {
|
||||||
|
PoisonShadow(mem, ClassMmapSize(i), 0);
|
||||||
|
allocated_size_classes_[i] = 0;
|
||||||
|
UnmapOrDie((void*)mem, ClassMmapSize(i));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr FakeStack::ClassMmapSize(uptr size_class) {
|
||||||
|
return RoundUpToPowerOfTwo(stack_size_);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FakeStack::AllocateOneSizeClass(uptr size_class) {
|
||||||
|
CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
|
||||||
|
uptr new_mem = (uptr)MmapOrDie(
|
||||||
|
ClassMmapSize(size_class), __FUNCTION__);
|
||||||
|
// Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
|
||||||
|
// asanThreadRegistry().GetCurrent()->tid(),
|
||||||
|
// size_class, new_mem, new_mem + ClassMmapSize(size_class),
|
||||||
|
// ClassMmapSize(size_class));
|
||||||
|
uptr i;
|
||||||
|
for (i = 0; i < ClassMmapSize(size_class);
|
||||||
|
i += ClassSize(size_class)) {
|
||||||
|
size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
|
||||||
|
}
|
||||||
|
CHECK(i == ClassMmapSize(size_class));
|
||||||
|
allocated_size_classes_[size_class] = new_mem;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
|
||||||
|
if (!alive_) return real_stack;
|
||||||
|
CHECK(size <= kMaxStackMallocSize && size > 1);
|
||||||
|
uptr size_class = ComputeSizeClass(size);
|
||||||
|
if (!allocated_size_classes_[size_class]) {
|
||||||
|
AllocateOneSizeClass(size_class);
|
||||||
|
}
|
||||||
|
FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
|
||||||
|
CHECK(fake_frame);
|
||||||
|
fake_frame->size_minus_one = size - 1;
|
||||||
|
fake_frame->real_stack = real_stack;
|
||||||
|
while (FakeFrame *top = call_stack_.top()) {
|
||||||
|
if (top->real_stack > real_stack) break;
|
||||||
|
call_stack_.LifoPop();
|
||||||
|
DeallocateFrame(top);
|
||||||
|
}
|
||||||
|
call_stack_.LifoPush(fake_frame);
|
||||||
|
uptr ptr = (uptr)fake_frame;
|
||||||
|
PoisonShadow(ptr, size, 0);
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
|
||||||
|
CHECK(alive_);
|
||||||
|
uptr size = fake_frame->size_minus_one + 1;
|
||||||
|
uptr size_class = ComputeSizeClass(size);
|
||||||
|
CHECK(allocated_size_classes_[size_class]);
|
||||||
|
uptr ptr = (uptr)fake_frame;
|
||||||
|
CHECK(AddrIsInSizeClass(ptr, size_class));
|
||||||
|
CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
|
||||||
|
size_classes_[size_class].FifoPush(fake_frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
|
||||||
|
FakeFrame *fake_frame = (FakeFrame*)ptr;
|
||||||
|
CHECK(fake_frame->magic = kRetiredStackFrameMagic);
|
||||||
|
CHECK(fake_frame->descr != 0);
|
||||||
|
CHECK(fake_frame->size_minus_one == size - 1);
|
||||||
|
PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace __asan
|
||||||
|
|
||||||
|
// ---------------------- Interface ---------------- {{{1
|
||||||
|
using namespace __asan; // NOLINT
|
||||||
|
|
||||||
|
uptr __asan_stack_malloc(uptr size, uptr real_stack) {
|
||||||
|
if (!flags()->use_fake_stack) return real_stack;
|
||||||
|
AsanThread *t = asanThreadRegistry().GetCurrent();
|
||||||
|
if (!t) {
|
||||||
|
// TSD is gone, use the real stack.
|
||||||
|
return real_stack;
|
||||||
|
}
|
||||||
|
uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
|
||||||
|
// Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
|
||||||
|
if (!flags()->use_fake_stack) return;
|
||||||
|
if (ptr != real_stack) {
|
||||||
|
FakeStack::OnFree(ptr, size, real_stack);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -43,7 +43,7 @@ struct Flags {
|
||||||
int report_globals;
|
int report_globals;
|
||||||
// If set, attempts to catch initialization order issues.
|
// If set, attempts to catch initialization order issues.
|
||||||
bool check_initialization_order;
|
bool check_initialization_order;
|
||||||
// Max number of stack frames kept for each allocation.
|
// Max number of stack frames kept for each allocation/deallocation.
|
||||||
int malloc_context_size;
|
int malloc_context_size;
|
||||||
// If set, uses custom wrappers and replacements for libc string functions
|
// If set, uses custom wrappers and replacements for libc string functions
|
||||||
// to find more errors.
|
// to find more errors.
|
||||||
|
|
@ -93,6 +93,17 @@ struct Flags {
|
||||||
bool print_full_thread_history;
|
bool print_full_thread_history;
|
||||||
// ASan will write logs to "log_path.pid" instead of stderr.
|
// ASan will write logs to "log_path.pid" instead of stderr.
|
||||||
const char *log_path;
|
const char *log_path;
|
||||||
|
// Use fast (frame-pointer-based) unwinder on fatal errors (if available).
|
||||||
|
bool fast_unwind_on_fatal;
|
||||||
|
// Use fast (frame-pointer-based) unwinder on malloc/free (if available).
|
||||||
|
bool fast_unwind_on_malloc;
|
||||||
|
// Poison (or not) the heap memory on [de]allocation. Zero value is useful
|
||||||
|
// for benchmarking the allocator or instrumentator.
|
||||||
|
bool poison_heap;
|
||||||
|
// Report errors on malloc/delete, new/free, new/delete[], etc.
|
||||||
|
bool alloc_dealloc_mismatch;
|
||||||
|
// Use stack depot instead of storing stacks in the redzones.
|
||||||
|
bool use_stack_depot;
|
||||||
};
|
};
|
||||||
|
|
||||||
Flags *flags();
|
Flags *flags();
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@
|
||||||
|
|
||||||
#include "asan_internal.h"
|
#include "asan_internal.h"
|
||||||
#include "interception/interception.h"
|
#include "interception/interception.h"
|
||||||
|
#include "sanitizer_common/sanitizer_platform_interceptors.h"
|
||||||
|
|
||||||
using __sanitizer::uptr;
|
using __sanitizer::uptr;
|
||||||
|
|
||||||
|
|
@ -39,8 +40,10 @@ using __sanitizer::uptr;
|
||||||
|
|
||||||
#if defined(__linux__)
|
#if defined(__linux__)
|
||||||
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
|
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
|
||||||
|
# define ASAN_INTERCEPT_PRCTL 1
|
||||||
#else
|
#else
|
||||||
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
|
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
|
||||||
|
# define ASAN_INTERCEPT_PRCTL 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(__APPLE__)
|
#if !defined(__APPLE__)
|
||||||
|
|
@ -149,10 +152,23 @@ DECLARE_FUNCTION_AND_WRAPPER(long long, atoll, const char *nptr); // NOLINT
|
||||||
DECLARE_FUNCTION_AND_WRAPPER(long long, strtoll, const char *nptr, char **endptr, int base); // NOLINT
|
DECLARE_FUNCTION_AND_WRAPPER(long long, strtoll, const char *nptr, char **endptr, int base); // NOLINT
|
||||||
# endif
|
# endif
|
||||||
|
|
||||||
|
// unistd.h
|
||||||
|
# if SANITIZER_INTERCEPT_READ
|
||||||
|
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, read, int fd, void *buf, SIZE_T count);
|
||||||
|
# endif
|
||||||
|
# if SANITIZER_INTERCEPT_PREAD
|
||||||
|
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread, int fd, void *buf,
|
||||||
|
SIZE_T count, OFF_T offset);
|
||||||
|
# endif
|
||||||
|
# if SANITIZER_INTERCEPT_PREAD64
|
||||||
|
DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread64, int fd, void *buf,
|
||||||
|
SIZE_T count, OFF64_T offset);
|
||||||
|
# endif
|
||||||
|
|
||||||
# if ASAN_INTERCEPT_MLOCKX
|
# if ASAN_INTERCEPT_MLOCKX
|
||||||
// mlock/munlock
|
// mlock/munlock
|
||||||
DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, size_t len);
|
DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, SIZE_T len);
|
||||||
DECLARE_FUNCTION_AND_WRAPPER(int, munlock, const void *addr, size_t len);
|
DECLARE_FUNCTION_AND_WRAPPER(int, munlock, const void *addr, SIZE_T len);
|
||||||
DECLARE_FUNCTION_AND_WRAPPER(int, mlockall, int flags);
|
DECLARE_FUNCTION_AND_WRAPPER(int, mlockall, int flags);
|
||||||
DECLARE_FUNCTION_AND_WRAPPER(int, munlockall, void);
|
DECLARE_FUNCTION_AND_WRAPPER(int, munlockall, void);
|
||||||
# endif
|
# endif
|
||||||
|
|
|
||||||
|
|
@ -25,38 +25,20 @@
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
// Instruments read/write access to a single byte in memory.
|
|
||||||
// On error calls __asan_report_error, which aborts the program.
|
|
||||||
#define ACCESS_ADDRESS(address, isWrite) do { \
|
|
||||||
if (!AddrIsInMem(address) || AddressIsPoisoned(address)) { \
|
|
||||||
GET_CURRENT_PC_BP_SP; \
|
|
||||||
__asan_report_error(pc, bp, sp, address, isWrite, /* access_size */ 1); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
|
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
|
||||||
// and ASAN_WRITE_RANGE as macro instead of function so
|
// and ASAN_WRITE_RANGE as macro instead of function so
|
||||||
// that no extra frames are created, and stack trace contains
|
// that no extra frames are created, and stack trace contains
|
||||||
// relevant information only.
|
// relevant information only.
|
||||||
|
// We check all shadow bytes.
|
||||||
// Instruments read/write access to a memory range.
|
#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \
|
||||||
// More complex implementation is possible, for now just
|
if (uptr __ptr = __asan_region_is_poisoned((uptr)(offset), size)) { \
|
||||||
// checking the first and the last byte of a range.
|
GET_CURRENT_PC_BP_SP; \
|
||||||
#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \
|
__asan_report_error(pc, bp, sp, __ptr, isWrite, /* access_size */1); \
|
||||||
if (size > 0) { \
|
} \
|
||||||
uptr ptr = (uptr)(offset); \
|
|
||||||
ACCESS_ADDRESS(ptr, isWrite); \
|
|
||||||
ACCESS_ADDRESS(ptr + (size) - 1, isWrite); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define ASAN_READ_RANGE(offset, size) do { \
|
#define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false)
|
||||||
ACCESS_MEMORY_RANGE(offset, size, false); \
|
#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true);
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define ASAN_WRITE_RANGE(offset, size) do { \
|
|
||||||
ACCESS_MEMORY_RANGE(offset, size, true); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
// Behavior of functions like "memcpy" or "strcpy" is undefined
|
// Behavior of functions like "memcpy" or "strcpy" is undefined
|
||||||
// if memory intervals overlap. We report error in this case.
|
// if memory intervals overlap. We report error in this case.
|
||||||
|
|
@ -69,7 +51,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
|
||||||
const char *offset1 = (const char*)_offset1; \
|
const char *offset1 = (const char*)_offset1; \
|
||||||
const char *offset2 = (const char*)_offset2; \
|
const char *offset2 = (const char*)_offset2; \
|
||||||
if (RangesOverlap(offset1, length1, offset2, length2)) { \
|
if (RangesOverlap(offset1, length1, offset2, length2)) { \
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax); \
|
GET_STACK_TRACE_FATAL_HERE; \
|
||||||
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
|
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
|
||||||
offset2, length2, &stack); \
|
offset2, length2, &stack); \
|
||||||
} \
|
} \
|
||||||
|
|
@ -96,6 +78,11 @@ static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
|
||||||
// ---------------------- Wrappers ---------------- {{{1
|
// ---------------------- Wrappers ---------------- {{{1
|
||||||
using namespace __asan; // NOLINT
|
using namespace __asan; // NOLINT
|
||||||
|
|
||||||
|
#define COMMON_INTERCEPTOR_WRITE_RANGE(ptr, size) ASAN_WRITE_RANGE(ptr, size)
|
||||||
|
#define COMMON_INTERCEPTOR_READ_RANGE(ptr, size) ASAN_READ_RANGE(ptr, size)
|
||||||
|
#define COMMON_INTERCEPTOR_ENTER(func, ...) ENSURE_ASAN_INITED()
|
||||||
|
#include "sanitizer_common/sanitizer_common_interceptors.h"
|
||||||
|
|
||||||
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||||
AsanThread *t = (AsanThread*)arg;
|
AsanThread *t = (AsanThread*)arg;
|
||||||
asanThreadRegistry().SetCurrent(t);
|
asanThreadRegistry().SetCurrent(t);
|
||||||
|
|
@ -105,7 +92,7 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||||
#if ASAN_INTERCEPT_PTHREAD_CREATE
|
#if ASAN_INTERCEPT_PTHREAD_CREATE
|
||||||
INTERCEPTOR(int, pthread_create, void *thread,
|
INTERCEPTOR(int, pthread_create, void *thread,
|
||||||
void *attr, void *(*start_routine)(void*), void *arg) {
|
void *attr, void *(*start_routine)(void*), void *arg) {
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax);
|
GET_STACK_TRACE_THREAD;
|
||||||
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
|
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
|
||||||
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
|
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
|
||||||
asanThreadRegistry().RegisterThread(t);
|
asanThreadRegistry().RegisterThread(t);
|
||||||
|
|
@ -175,6 +162,25 @@ INTERCEPTOR(void, siglongjmp, void *env, int val) {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if ASAN_INTERCEPT_PRCTL
|
||||||
|
#define PR_SET_NAME 15
|
||||||
|
INTERCEPTOR(int, prctl, int option,
|
||||||
|
unsigned long arg2, unsigned long arg3, // NOLINT
|
||||||
|
unsigned long arg4, unsigned long arg5) { // NOLINT
|
||||||
|
int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
|
||||||
|
if (option == PR_SET_NAME) {
|
||||||
|
AsanThread *t = asanThreadRegistry().GetCurrent();
|
||||||
|
if (t) {
|
||||||
|
char buff[17];
|
||||||
|
internal_strncpy(buff, (char*)arg2, 16);
|
||||||
|
buff[16] = 0;
|
||||||
|
t->summary()->set_name(buff);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if ASAN_INTERCEPT___CXA_THROW
|
#if ASAN_INTERCEPT___CXA_THROW
|
||||||
INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
|
INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
|
||||||
CHECK(REAL(__cxa_throw));
|
CHECK(REAL(__cxa_throw));
|
||||||
|
|
@ -256,8 +262,8 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
|
||||||
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
|
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
|
||||||
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
|
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
|
||||||
}
|
}
|
||||||
ASAN_WRITE_RANGE(from, size);
|
ASAN_READ_RANGE(from, size);
|
||||||
ASAN_READ_RANGE(to, size);
|
ASAN_WRITE_RANGE(to, size);
|
||||||
}
|
}
|
||||||
#if MAC_INTERPOSE_FUNCTIONS
|
#if MAC_INTERPOSE_FUNCTIONS
|
||||||
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8.
|
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8.
|
||||||
|
|
@ -275,8 +281,8 @@ INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
|
||||||
}
|
}
|
||||||
ENSURE_ASAN_INITED();
|
ENSURE_ASAN_INITED();
|
||||||
if (flags()->replace_intrin) {
|
if (flags()->replace_intrin) {
|
||||||
ASAN_WRITE_RANGE(from, size);
|
ASAN_READ_RANGE(from, size);
|
||||||
ASAN_READ_RANGE(to, size);
|
ASAN_WRITE_RANGE(to, size);
|
||||||
}
|
}
|
||||||
#if MAC_INTERPOSE_FUNCTIONS
|
#if MAC_INTERPOSE_FUNCTIONS
|
||||||
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8.
|
// Interposing of resolver functions is broken on Mac OS 10.7 and 10.8.
|
||||||
|
|
@ -621,7 +627,7 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
|
||||||
void* security, uptr stack_size,
|
void* security, uptr stack_size,
|
||||||
DWORD (__stdcall *start_routine)(void*), void* arg,
|
DWORD (__stdcall *start_routine)(void*), void* arg,
|
||||||
DWORD flags, void* tid) {
|
DWORD flags, void* tid) {
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax);
|
GET_STACK_TRACE_THREAD;
|
||||||
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
|
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
|
||||||
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
|
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
|
||||||
asanThreadRegistry().RegisterThread(t);
|
asanThreadRegistry().RegisterThread(t);
|
||||||
|
|
@ -646,6 +652,9 @@ void InitializeAsanInterceptors() {
|
||||||
#if MAC_INTERPOSE_FUNCTIONS
|
#if MAC_INTERPOSE_FUNCTIONS
|
||||||
return;
|
return;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
SANITIZER_COMMON_INTERCEPTORS_INIT;
|
||||||
|
|
||||||
// Intercept mem* functions.
|
// Intercept mem* functions.
|
||||||
ASAN_INTERCEPT_FUNC(memcmp);
|
ASAN_INTERCEPT_FUNC(memcmp);
|
||||||
ASAN_INTERCEPT_FUNC(memmove);
|
ASAN_INTERCEPT_FUNC(memmove);
|
||||||
|
|
@ -718,6 +727,9 @@ void InitializeAsanInterceptors() {
|
||||||
#if ASAN_INTERCEPT_SIGLONGJMP
|
#if ASAN_INTERCEPT_SIGLONGJMP
|
||||||
ASAN_INTERCEPT_FUNC(siglongjmp);
|
ASAN_INTERCEPT_FUNC(siglongjmp);
|
||||||
#endif
|
#endif
|
||||||
|
#if ASAN_INTERCEPT_PRCTL
|
||||||
|
ASAN_INTERCEPT_FUNC(prctl);
|
||||||
|
#endif
|
||||||
|
|
||||||
// Intercept exception handling functions.
|
// Intercept exception handling functions.
|
||||||
#if ASAN_INTERCEPT___CXA_THROW
|
#if ASAN_INTERCEPT___CXA_THROW
|
||||||
|
|
|
||||||
|
|
@ -81,9 +81,9 @@
|
||||||
// If set, values like allocator chunk size, as well as defaults for some flags
|
// If set, values like allocator chunk size, as well as defaults for some flags
|
||||||
// will be changed towards less memory overhead.
|
// will be changed towards less memory overhead.
|
||||||
#ifndef ASAN_LOW_MEMORY
|
#ifndef ASAN_LOW_MEMORY
|
||||||
# ifdef ASAN_ANDROID
|
#if SANITIZER_WORDSIZE == 32
|
||||||
# define ASAN_LOW_MEMORY 1
|
# define ASAN_LOW_MEMORY 1
|
||||||
# else
|
#else
|
||||||
# define ASAN_LOW_MEMORY 0
|
# define ASAN_LOW_MEMORY 0
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
@ -143,6 +143,15 @@ bool PlatformHasDifferentMemcpyAndMemmove();
|
||||||
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
|
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
|
||||||
#endif // __APPLE__
|
#endif // __APPLE__
|
||||||
|
|
||||||
|
// Add convenient macro for interface functions that may be represented as
|
||||||
|
// weak hooks.
|
||||||
|
#define ASAN_MALLOC_HOOK(ptr, size) \
|
||||||
|
if (&__asan_malloc_hook) __asan_malloc_hook(ptr, size)
|
||||||
|
#define ASAN_FREE_HOOK(ptr) \
|
||||||
|
if (&__asan_free_hook) __asan_free_hook(ptr)
|
||||||
|
#define ASAN_ON_ERROR() \
|
||||||
|
if (&__asan_on_error) __asan_on_error()
|
||||||
|
|
||||||
extern int asan_inited;
|
extern int asan_inited;
|
||||||
// Used to avoid infinite recursion in __asan_init().
|
// Used to avoid infinite recursion in __asan_init().
|
||||||
extern bool asan_init_is_running;
|
extern bool asan_init_is_running;
|
||||||
|
|
|
||||||
|
|
@ -120,53 +120,21 @@ void AsanLock::Unlock() {
|
||||||
pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_);
|
pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __arm__
|
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
|
||||||
#define UNWIND_STOP _URC_END_OF_STACK
|
|
||||||
#define UNWIND_CONTINUE _URC_NO_REASON
|
|
||||||
#else
|
|
||||||
#define UNWIND_STOP _URC_NORMAL_STOP
|
|
||||||
#define UNWIND_CONTINUE _URC_NO_REASON
|
|
||||||
#endif
|
|
||||||
|
|
||||||
uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
|
|
||||||
#ifdef __arm__
|
|
||||||
uptr val;
|
|
||||||
_Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
|
|
||||||
15 /* r15 = PC */, _UVRSD_UINT32, &val);
|
|
||||||
CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
|
|
||||||
// Clear the Thumb bit.
|
|
||||||
return val & ~(uptr)1;
|
|
||||||
#else
|
|
||||||
return _Unwind_GetIP(ctx);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx,
|
|
||||||
void *param) {
|
|
||||||
StackTrace *b = (StackTrace*)param;
|
|
||||||
CHECK(b->size < b->max_size);
|
|
||||||
uptr pc = Unwind_GetIP(ctx);
|
|
||||||
b->trace[b->size++] = pc;
|
|
||||||
if (b->size == b->max_size) return UNWIND_STOP;
|
|
||||||
return UNWIND_CONTINUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
|
|
||||||
stack->size = 0;
|
|
||||||
stack->trace[0] = pc;
|
|
||||||
if ((max_s) > 1) {
|
|
||||||
stack->max_size = max_s;
|
|
||||||
#if defined(__arm__) || \
|
#if defined(__arm__) || \
|
||||||
defined(__powerpc__) || defined(__powerpc64__) || \
|
defined(__powerpc__) || defined(__powerpc64__) || \
|
||||||
defined(__sparc__)
|
defined(__sparc__)
|
||||||
_Unwind_Backtrace(Unwind_Trace, stack);
|
fast = false;
|
||||||
// Pop off the two ASAN functions from the backtrace.
|
#endif
|
||||||
stack->PopStackFrames(2);
|
if (!fast)
|
||||||
#else
|
return stack->SlowUnwindStack(pc, max_s);
|
||||||
|
stack->size = 0;
|
||||||
|
stack->trace[0] = pc;
|
||||||
|
if (max_s > 1) {
|
||||||
|
stack->max_size = max_s;
|
||||||
if (!asan_inited) return;
|
if (!asan_inited) return;
|
||||||
if (AsanThread *t = asanThreadRegistry().GetCurrent())
|
if (AsanThread *t = asanThreadRegistry().GetCurrent())
|
||||||
stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom());
|
stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom());
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -158,7 +158,8 @@ void AsanLock::Unlock() {
|
||||||
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
|
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
|
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
|
||||||
|
(void)fast;
|
||||||
stack->size = 0;
|
stack->size = 0;
|
||||||
stack->trace[0] = pc;
|
stack->trace[0] = pc;
|
||||||
if ((max_s) > 1) {
|
if ((max_s) > 1) {
|
||||||
|
|
@ -306,7 +307,7 @@ void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
|
||||||
// alloc_asan_context().
|
// alloc_asan_context().
|
||||||
extern "C"
|
extern "C"
|
||||||
void asan_dispatch_call_block_and_release(void *block) {
|
void asan_dispatch_call_block_and_release(void *block) {
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax);
|
GET_STACK_TRACE_THREAD;
|
||||||
asan_block_context_t *context = (asan_block_context_t*)block;
|
asan_block_context_t *context = (asan_block_context_t*)block;
|
||||||
if (flags()->verbosity >= 2) {
|
if (flags()->verbosity >= 2) {
|
||||||
Report("asan_dispatch_call_block_and_release(): "
|
Report("asan_dispatch_call_block_and_release(): "
|
||||||
|
|
@ -316,7 +317,7 @@ void asan_dispatch_call_block_and_release(void *block) {
|
||||||
asan_register_worker_thread(context->parent_tid, &stack);
|
asan_register_worker_thread(context->parent_tid, &stack);
|
||||||
// Call the original dispatcher for the block.
|
// Call the original dispatcher for the block.
|
||||||
context->func(context->block);
|
context->func(context->block);
|
||||||
asan_free(context, &stack);
|
asan_free(context, &stack, FROM_MALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
@ -341,7 +342,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
|
||||||
#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
|
#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
|
||||||
INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
|
INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
|
||||||
dispatch_function_t func) { \
|
dispatch_function_t func) { \
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax); \
|
GET_STACK_TRACE_THREAD; \
|
||||||
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
|
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
|
||||||
if (flags()->verbosity >= 2) { \
|
if (flags()->verbosity >= 2) { \
|
||||||
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
|
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
|
||||||
|
|
@ -359,7 +360,7 @@ INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
|
||||||
INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
|
INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
|
||||||
dispatch_queue_t dq, void *ctxt,
|
dispatch_queue_t dq, void *ctxt,
|
||||||
dispatch_function_t func) {
|
dispatch_function_t func) {
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax);
|
GET_STACK_TRACE_THREAD;
|
||||||
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
|
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
|
||||||
if (flags()->verbosity >= 2) {
|
if (flags()->verbosity >= 2) {
|
||||||
Report("dispatch_after_f: %p\n", asan_ctxt);
|
Report("dispatch_after_f: %p\n", asan_ctxt);
|
||||||
|
|
@ -372,7 +373,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
|
||||||
INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
|
INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
|
||||||
dispatch_queue_t dq, void *ctxt,
|
dispatch_queue_t dq, void *ctxt,
|
||||||
dispatch_function_t func) {
|
dispatch_function_t func) {
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax);
|
GET_STACK_TRACE_THREAD;
|
||||||
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
|
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
|
||||||
if (flags()->verbosity >= 2) {
|
if (flags()->verbosity >= 2) {
|
||||||
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
|
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
|
||||||
|
|
@ -407,7 +408,7 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
|
||||||
void (^asan_block)(void); \
|
void (^asan_block)(void); \
|
||||||
int parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); \
|
int parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); \
|
||||||
asan_block = ^(void) { \
|
asan_block = ^(void) { \
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax); \
|
GET_STACK_TRACE_THREAD; \
|
||||||
asan_register_worker_thread(parent_tid, &stack); \
|
asan_register_worker_thread(parent_tid, &stack); \
|
||||||
work(); \
|
work(); \
|
||||||
}
|
}
|
||||||
|
|
@ -457,15 +458,15 @@ void *wrap_workitem_func(void *arg) {
|
||||||
asan_block_context_t *ctxt = (asan_block_context_t*)arg;
|
asan_block_context_t *ctxt = (asan_block_context_t*)arg;
|
||||||
worker_t fn = (worker_t)(ctxt->func);
|
worker_t fn = (worker_t)(ctxt->func);
|
||||||
void *result = fn(ctxt->block);
|
void *result = fn(ctxt->block);
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax);
|
GET_STACK_TRACE_THREAD;
|
||||||
asan_free(arg, &stack);
|
asan_free(arg, &stack, FROM_MALLOC);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
|
INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
|
||||||
void *(*workitem_func)(void *), void * workitem_arg,
|
void *(*workitem_func)(void *), void * workitem_arg,
|
||||||
pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
|
pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax);
|
GET_STACK_TRACE_THREAD;
|
||||||
asan_block_context_t *asan_ctxt =
|
asan_block_context_t *asan_ctxt =
|
||||||
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
|
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
|
||||||
asan_ctxt->block = workitem_arg;
|
asan_ctxt->block = workitem_arg;
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,8 @@
|
||||||
#include "asan_interceptors.h"
|
#include "asan_interceptors.h"
|
||||||
#include "asan_internal.h"
|
#include "asan_internal.h"
|
||||||
#include "asan_stack.h"
|
#include "asan_stack.h"
|
||||||
|
#include "asan_thread_registry.h"
|
||||||
|
#include "sanitizer/asan_interface.h"
|
||||||
|
|
||||||
#if ASAN_ANDROID
|
#if ASAN_ANDROID
|
||||||
DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size)
|
DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size)
|
||||||
|
|
@ -57,17 +59,17 @@ void ReplaceSystemMalloc() {
|
||||||
using namespace __asan; // NOLINT
|
using namespace __asan; // NOLINT
|
||||||
|
|
||||||
INTERCEPTOR(void, free, void *ptr) {
|
INTERCEPTOR(void, free, void *ptr) {
|
||||||
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
|
GET_STACK_TRACE_FREE;
|
||||||
asan_free(ptr, &stack);
|
asan_free(ptr, &stack, FROM_MALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR(void, cfree, void *ptr) {
|
INTERCEPTOR(void, cfree, void *ptr) {
|
||||||
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
|
GET_STACK_TRACE_FREE;
|
||||||
asan_free(ptr, &stack);
|
asan_free(ptr, &stack, FROM_MALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR(void*, malloc, uptr size) {
|
INTERCEPTOR(void*, malloc, uptr size) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_malloc(size, &stack);
|
return asan_malloc(size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -83,25 +85,25 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
|
||||||
CHECK(allocated < kCallocPoolSize);
|
CHECK(allocated < kCallocPoolSize);
|
||||||
return mem;
|
return mem;
|
||||||
}
|
}
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_calloc(nmemb, size, &stack);
|
return asan_calloc(nmemb, size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
|
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_realloc(ptr, size, &stack);
|
return asan_realloc(ptr, size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
|
INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_memalign(boundary, size, &stack);
|
return asan_memalign(boundary, size, &stack, FROM_MALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
|
INTERCEPTOR(void*, __libc_memalign, uptr align, uptr s)
|
||||||
ALIAS("memalign");
|
ALIAS("memalign");
|
||||||
|
|
||||||
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
|
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_malloc_usable_size(ptr, &stack);
|
return asan_malloc_usable_size(ptr, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -124,19 +126,23 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
|
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
// Printf("posix_memalign: %zx %zu\n", alignment, size);
|
// Printf("posix_memalign: %zx %zu\n", alignment, size);
|
||||||
return asan_posix_memalign(memptr, alignment, size, &stack);
|
return asan_posix_memalign(memptr, alignment, size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR(void*, valloc, uptr size) {
|
INTERCEPTOR(void*, valloc, uptr size) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_valloc(size, &stack);
|
return asan_valloc(size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
INTERCEPTOR(void*, pvalloc, uptr size) {
|
INTERCEPTOR(void*, pvalloc, uptr size) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_pvalloc(size, &stack);
|
return asan_pvalloc(size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
INTERCEPTOR(void, malloc_stats, void) {
|
||||||
|
__asan_print_accumulated_stats();
|
||||||
|
}
|
||||||
|
|
||||||
#endif // __linux__
|
#endif // __linux__
|
||||||
|
|
|
||||||
|
|
@ -90,8 +90,8 @@ INTERCEPTOR(void, free, void *ptr) {
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
if (!asan_mz_size(ptr)) ptr = get_saved_cfallocator_ref(ptr);
|
if (!asan_mz_size(ptr)) ptr = get_saved_cfallocator_ref(ptr);
|
||||||
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
|
GET_STACK_TRACE_FREE;
|
||||||
asan_free(ptr, &stack);
|
asan_free(ptr, &stack, FROM_MALLOC);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -128,7 +128,7 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
|
||||||
CHECK(system_malloc_zone);
|
CHECK(system_malloc_zone);
|
||||||
return malloc_zone_malloc(system_malloc_zone, size);
|
return malloc_zone_malloc(system_malloc_zone, size);
|
||||||
}
|
}
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_malloc(size, &stack);
|
return asan_malloc(size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -137,7 +137,7 @@ void *cf_malloc(CFIndex size, CFOptionFlags hint, void *info) {
|
||||||
CHECK(system_malloc_zone);
|
CHECK(system_malloc_zone);
|
||||||
return malloc_zone_malloc(system_malloc_zone, size);
|
return malloc_zone_malloc(system_malloc_zone, size);
|
||||||
}
|
}
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_malloc(size, &stack);
|
return asan_malloc(size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -153,7 +153,7 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
|
||||||
CHECK(allocated < kCallocPoolSize);
|
CHECK(allocated < kCallocPoolSize);
|
||||||
return mem;
|
return mem;
|
||||||
}
|
}
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_calloc(nmemb, size, &stack);
|
return asan_calloc(nmemb, size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -162,8 +162,8 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) {
|
||||||
CHECK(system_malloc_zone);
|
CHECK(system_malloc_zone);
|
||||||
return malloc_zone_valloc(system_malloc_zone, size);
|
return malloc_zone_valloc(system_malloc_zone, size);
|
||||||
}
|
}
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_memalign(GetPageSizeCached(), size, &stack);
|
return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GET_ZONE_FOR_PTR(ptr) \
|
#define GET_ZONE_FOR_PTR(ptr) \
|
||||||
|
|
@ -173,8 +173,8 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) {
|
||||||
void ALWAYS_INLINE free_common(void *context, void *ptr) {
|
void ALWAYS_INLINE free_common(void *context, void *ptr) {
|
||||||
if (!ptr) return;
|
if (!ptr) return;
|
||||||
if (asan_mz_size(ptr)) {
|
if (asan_mz_size(ptr)) {
|
||||||
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
|
GET_STACK_TRACE_FREE;
|
||||||
asan_free(ptr, &stack);
|
asan_free(ptr, &stack, FROM_MALLOC);
|
||||||
} else {
|
} else {
|
||||||
// If the pointer does not belong to any of the zones, use one of the
|
// If the pointer does not belong to any of the zones, use one of the
|
||||||
// fallback methods to free memory.
|
// fallback methods to free memory.
|
||||||
|
|
@ -188,9 +188,9 @@ void ALWAYS_INLINE free_common(void *context, void *ptr) {
|
||||||
// If the memory chunk pointer was moved to store additional
|
// If the memory chunk pointer was moved to store additional
|
||||||
// CFAllocatorRef, fix it back.
|
// CFAllocatorRef, fix it back.
|
||||||
ptr = get_saved_cfallocator_ref(ptr);
|
ptr = get_saved_cfallocator_ref(ptr);
|
||||||
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
|
GET_STACK_TRACE_FREE;
|
||||||
if (!flags()->mac_ignore_invalid_free) {
|
if (!flags()->mac_ignore_invalid_free) {
|
||||||
asan_free(ptr, &stack);
|
asan_free(ptr, &stack, FROM_MALLOC);
|
||||||
} else {
|
} else {
|
||||||
GET_ZONE_FOR_PTR(ptr);
|
GET_ZONE_FOR_PTR(ptr);
|
||||||
WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
|
WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
|
||||||
|
|
@ -211,17 +211,17 @@ void cf_free(void *ptr, void *info) {
|
||||||
|
|
||||||
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
|
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
|
||||||
if (!ptr) {
|
if (!ptr) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_malloc(size, &stack);
|
return asan_malloc(size, &stack);
|
||||||
} else {
|
} else {
|
||||||
if (asan_mz_size(ptr)) {
|
if (asan_mz_size(ptr)) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_realloc(ptr, size, &stack);
|
return asan_realloc(ptr, size, &stack);
|
||||||
} else {
|
} else {
|
||||||
// We can't recover from reallocating an unknown address, because
|
// We can't recover from reallocating an unknown address, because
|
||||||
// this would require reading at most |size| bytes from
|
// this would require reading at most |size| bytes from
|
||||||
// potentially unaccessible memory.
|
// potentially unaccessible memory.
|
||||||
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
|
GET_STACK_TRACE_FREE;
|
||||||
GET_ZONE_FOR_PTR(ptr);
|
GET_ZONE_FOR_PTR(ptr);
|
||||||
ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
|
ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
|
||||||
}
|
}
|
||||||
|
|
@ -230,17 +230,17 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
|
||||||
|
|
||||||
void *cf_realloc(void *ptr, CFIndex size, CFOptionFlags hint, void *info) {
|
void *cf_realloc(void *ptr, CFIndex size, CFOptionFlags hint, void *info) {
|
||||||
if (!ptr) {
|
if (!ptr) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_malloc(size, &stack);
|
return asan_malloc(size, &stack);
|
||||||
} else {
|
} else {
|
||||||
if (asan_mz_size(ptr)) {
|
if (asan_mz_size(ptr)) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_realloc(ptr, size, &stack);
|
return asan_realloc(ptr, size, &stack);
|
||||||
} else {
|
} else {
|
||||||
// We can't recover from reallocating an unknown address, because
|
// We can't recover from reallocating an unknown address, because
|
||||||
// this would require reading at most |size| bytes from
|
// this would require reading at most |size| bytes from
|
||||||
// potentially unaccessible memory.
|
// potentially unaccessible memory.
|
||||||
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
|
GET_STACK_TRACE_FREE;
|
||||||
GET_ZONE_FOR_PTR(ptr);
|
GET_ZONE_FOR_PTR(ptr);
|
||||||
ReportMacCfReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
|
ReportMacCfReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
|
||||||
}
|
}
|
||||||
|
|
@ -259,8 +259,8 @@ void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
|
||||||
CHECK(system_malloc_zone);
|
CHECK(system_malloc_zone);
|
||||||
return malloc_zone_memalign(system_malloc_zone, align, size);
|
return malloc_zone_memalign(system_malloc_zone, align, size);
|
||||||
}
|
}
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_memalign(align, size, &stack);
|
return asan_memalign(align, size, &stack, FROM_MALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function is currently unused, and we build with -Werror.
|
// This function is currently unused, and we build with -Werror.
|
||||||
|
|
|
||||||
|
|
@ -29,8 +29,8 @@ using namespace __asan; // NOLINT
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
void free(void *ptr) {
|
void free(void *ptr) {
|
||||||
GET_STACK_TRACE_HERE_FOR_FREE(ptr);
|
GET_STACK_TRACE_FREE;
|
||||||
return asan_free(ptr, &stack);
|
return asan_free(ptr, &stack, FROM_MALLOC);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _free_dbg(void* ptr, int) {
|
void _free_dbg(void* ptr, int) {
|
||||||
|
|
@ -42,7 +42,7 @@ void cfree(void *ptr) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void *malloc(size_t size) {
|
void *malloc(size_t size) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_malloc(size, &stack);
|
return asan_malloc(size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -51,7 +51,7 @@ void* _malloc_dbg(size_t size, int , const char*, int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void *calloc(size_t nmemb, size_t size) {
|
void *calloc(size_t nmemb, size_t size) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_calloc(nmemb, size, &stack);
|
return asan_calloc(nmemb, size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -64,7 +64,7 @@ void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void *realloc(void *ptr, size_t size) {
|
void *realloc(void *ptr, size_t size) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_realloc(ptr, size, &stack);
|
return asan_realloc(ptr, size, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -83,7 +83,7 @@ void* _recalloc(void* p, size_t n, size_t elem_size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t _msize(void *ptr) {
|
size_t _msize(void *ptr) {
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;
|
GET_STACK_TRACE_MALLOC;
|
||||||
return asan_malloc_usable_size(ptr, &stack);
|
return asan_malloc_usable_size(ptr, &stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,8 +18,8 @@
|
||||||
// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
|
// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
|
||||||
|
|
||||||
#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
|
#if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1
|
||||||
extern __attribute__((visibility("default"))) uptr __asan_mapping_scale;
|
extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale;
|
||||||
extern __attribute__((visibility("default"))) uptr __asan_mapping_offset;
|
extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset;
|
||||||
# define SHADOW_SCALE (__asan_mapping_scale)
|
# define SHADOW_SCALE (__asan_mapping_scale)
|
||||||
# define SHADOW_OFFSET (__asan_mapping_offset)
|
# define SHADOW_OFFSET (__asan_mapping_offset)
|
||||||
#else
|
#else
|
||||||
|
|
|
||||||
|
|
@ -33,32 +33,34 @@ namespace std {
|
||||||
struct nothrow_t {};
|
struct nothrow_t {};
|
||||||
} // namespace std
|
} // namespace std
|
||||||
|
|
||||||
#define OPERATOR_NEW_BODY \
|
#define OPERATOR_NEW_BODY(type) \
|
||||||
GET_STACK_TRACE_HERE_FOR_MALLOC;\
|
GET_STACK_TRACE_MALLOC;\
|
||||||
return asan_memalign(0, size, &stack);
|
return asan_memalign(0, size, &stack, type);
|
||||||
|
|
||||||
INTERCEPTOR_ATTRIBUTE
|
INTERCEPTOR_ATTRIBUTE
|
||||||
void *operator new(size_t size) { OPERATOR_NEW_BODY; }
|
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
|
||||||
INTERCEPTOR_ATTRIBUTE
|
INTERCEPTOR_ATTRIBUTE
|
||||||
void *operator new[](size_t size) { OPERATOR_NEW_BODY; }
|
void *operator new[](size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR); }
|
||||||
INTERCEPTOR_ATTRIBUTE
|
INTERCEPTOR_ATTRIBUTE
|
||||||
void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
|
void *operator new(size_t size, std::nothrow_t const&)
|
||||||
|
{ OPERATOR_NEW_BODY(FROM_NEW); }
|
||||||
INTERCEPTOR_ATTRIBUTE
|
INTERCEPTOR_ATTRIBUTE
|
||||||
void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
|
void *operator new[](size_t size, std::nothrow_t const&)
|
||||||
|
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
|
||||||
|
|
||||||
#define OPERATOR_DELETE_BODY \
|
#define OPERATOR_DELETE_BODY(type) \
|
||||||
GET_STACK_TRACE_HERE_FOR_FREE(ptr);\
|
GET_STACK_TRACE_FREE;\
|
||||||
asan_free(ptr, &stack);
|
asan_free(ptr, &stack, type);
|
||||||
|
|
||||||
INTERCEPTOR_ATTRIBUTE
|
INTERCEPTOR_ATTRIBUTE
|
||||||
void operator delete(void *ptr) { OPERATOR_DELETE_BODY; }
|
void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); }
|
||||||
INTERCEPTOR_ATTRIBUTE
|
INTERCEPTOR_ATTRIBUTE
|
||||||
void operator delete[](void *ptr) { OPERATOR_DELETE_BODY; }
|
void operator delete[](void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW_BR); }
|
||||||
INTERCEPTOR_ATTRIBUTE
|
INTERCEPTOR_ATTRIBUTE
|
||||||
void operator delete(void *ptr, std::nothrow_t const&)
|
void operator delete(void *ptr, std::nothrow_t const&)
|
||||||
{ OPERATOR_DELETE_BODY; }
|
{ OPERATOR_DELETE_BODY(FROM_NEW); }
|
||||||
INTERCEPTOR_ATTRIBUTE
|
INTERCEPTOR_ATTRIBUTE
|
||||||
void operator delete[](void *ptr, std::nothrow_t const&)
|
void operator delete[](void *ptr, std::nothrow_t const&)
|
||||||
{ OPERATOR_DELETE_BODY; }
|
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -14,10 +14,12 @@
|
||||||
#include "asan_internal.h"
|
#include "asan_internal.h"
|
||||||
#include "asan_mapping.h"
|
#include "asan_mapping.h"
|
||||||
#include "sanitizer/asan_interface.h"
|
#include "sanitizer/asan_interface.h"
|
||||||
|
#include "sanitizer_common/sanitizer_libc.h"
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
void PoisonShadow(uptr addr, uptr size, u8 value) {
|
void PoisonShadow(uptr addr, uptr size, u8 value) {
|
||||||
|
if (!flags()->poison_heap) return;
|
||||||
CHECK(AddrIsAlignedByGranularity(addr));
|
CHECK(AddrIsAlignedByGranularity(addr));
|
||||||
CHECK(AddrIsAlignedByGranularity(addr + size));
|
CHECK(AddrIsAlignedByGranularity(addr + size));
|
||||||
uptr shadow_beg = MemToShadow(addr);
|
uptr shadow_beg = MemToShadow(addr);
|
||||||
|
|
@ -30,6 +32,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
|
||||||
uptr size,
|
uptr size,
|
||||||
uptr redzone_size,
|
uptr redzone_size,
|
||||||
u8 value) {
|
u8 value) {
|
||||||
|
if (!flags()->poison_heap) return;
|
||||||
CHECK(AddrIsAlignedByGranularity(addr));
|
CHECK(AddrIsAlignedByGranularity(addr));
|
||||||
u8 *shadow = (u8*)MemToShadow(addr);
|
u8 *shadow = (u8*)MemToShadow(addr);
|
||||||
for (uptr i = 0; i < redzone_size;
|
for (uptr i = 0; i < redzone_size;
|
||||||
|
|
@ -150,6 +153,33 @@ bool __asan_address_is_poisoned(void const volatile *addr) {
|
||||||
return __asan::AddressIsPoisoned((uptr)addr);
|
return __asan::AddressIsPoisoned((uptr)addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uptr __asan_region_is_poisoned(uptr beg, uptr size) {
|
||||||
|
if (!size) return 0;
|
||||||
|
uptr end = beg + size;
|
||||||
|
if (!AddrIsInMem(beg)) return beg;
|
||||||
|
if (!AddrIsInMem(end)) return end;
|
||||||
|
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
|
||||||
|
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
|
||||||
|
uptr shadow_beg = MemToShadow(aligned_b);
|
||||||
|
uptr shadow_end = MemToShadow(aligned_e);
|
||||||
|
// First check the first and the last application bytes,
|
||||||
|
// then check the SHADOW_GRANULARITY-aligned region by calling
|
||||||
|
// mem_is_zero on the corresponding shadow.
|
||||||
|
if (!__asan::AddressIsPoisoned(beg) &&
|
||||||
|
!__asan::AddressIsPoisoned(end - 1) &&
|
||||||
|
(shadow_end <= shadow_beg ||
|
||||||
|
__sanitizer::mem_is_zero((const char *)shadow_beg,
|
||||||
|
shadow_end - shadow_beg)))
|
||||||
|
return 0;
|
||||||
|
// The fast check failed, so we have a poisoned byte somewhere.
|
||||||
|
// Find it slowly.
|
||||||
|
for (; beg < end; beg++)
|
||||||
|
if (__asan::AddressIsPoisoned(beg))
|
||||||
|
return beg;
|
||||||
|
UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
// This is a simplified version of __asan_(un)poison_memory_region, which
|
// This is a simplified version of __asan_(un)poison_memory_region, which
|
||||||
// assumes that left border of region to be poisoned is properly aligned.
|
// assumes that left border of region to be poisoned is properly aligned.
|
||||||
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
|
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
|
||||||
|
|
@ -166,7 +196,7 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
|
||||||
// If possible, mark all the bytes mapping to last shadow byte as
|
// If possible, mark all the bytes mapping to last shadow byte as
|
||||||
// unaddressable.
|
// unaddressable.
|
||||||
if (end_value > 0 && end_value <= end_offset)
|
if (end_value > 0 && end_value <= end_offset)
|
||||||
*shadow_end = kAsanStackUseAfterScopeMagic;
|
*shadow_end = (s8)kAsanStackUseAfterScopeMagic;
|
||||||
} else {
|
} else {
|
||||||
// If necessary, mark few first bytes mapping to last shadow byte
|
// If necessary, mark few first bytes mapping to last shadow byte
|
||||||
// as addressable
|
// as addressable
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,9 @@
|
||||||
#include "asan_stack.h"
|
#include "asan_stack.h"
|
||||||
#include "asan_thread.h"
|
#include "asan_thread.h"
|
||||||
#include "asan_thread_registry.h"
|
#include "asan_thread_registry.h"
|
||||||
|
#include "sanitizer_common/sanitizer_common.h"
|
||||||
|
#include "sanitizer_common/sanitizer_report_decorator.h"
|
||||||
|
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
|
|
@ -38,14 +41,79 @@ void AppendToErrorMessageBuffer(const char *buffer) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ---------------------- Decorator ------------------------------ {{{1
|
||||||
|
bool PrintsToTtyCached() {
|
||||||
|
static int cached = 0;
|
||||||
|
static bool prints_to_tty;
|
||||||
|
if (!cached) { // Ok wrt threads since we are printing only from one thread.
|
||||||
|
prints_to_tty = PrintsToTty();
|
||||||
|
cached = 1;
|
||||||
|
}
|
||||||
|
return prints_to_tty;
|
||||||
|
}
|
||||||
|
class Decorator: private __sanitizer::AnsiColorDecorator {
|
||||||
|
public:
|
||||||
|
Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
|
||||||
|
const char *Warning() { return Red(); }
|
||||||
|
const char *EndWarning() { return Default(); }
|
||||||
|
const char *Access() { return Blue(); }
|
||||||
|
const char *EndAccess() { return Default(); }
|
||||||
|
const char *Location() { return Green(); }
|
||||||
|
const char *EndLocation() { return Default(); }
|
||||||
|
const char *Allocation() { return Magenta(); }
|
||||||
|
const char *EndAllocation() { return Default(); }
|
||||||
|
|
||||||
|
const char *ShadowByte(u8 byte) {
|
||||||
|
switch (byte) {
|
||||||
|
case kAsanHeapLeftRedzoneMagic:
|
||||||
|
case kAsanHeapRightRedzoneMagic:
|
||||||
|
return Red();
|
||||||
|
case kAsanHeapFreeMagic:
|
||||||
|
return Magenta();
|
||||||
|
case kAsanStackLeftRedzoneMagic:
|
||||||
|
case kAsanStackMidRedzoneMagic:
|
||||||
|
case kAsanStackRightRedzoneMagic:
|
||||||
|
case kAsanStackPartialRedzoneMagic:
|
||||||
|
return Red();
|
||||||
|
case kAsanStackAfterReturnMagic:
|
||||||
|
return Magenta();
|
||||||
|
case kAsanInitializationOrderMagic:
|
||||||
|
return Cyan();
|
||||||
|
case kAsanUserPoisonedMemoryMagic:
|
||||||
|
return Blue();
|
||||||
|
case kAsanStackUseAfterScopeMagic:
|
||||||
|
return Magenta();
|
||||||
|
case kAsanGlobalRedzoneMagic:
|
||||||
|
return Red();
|
||||||
|
case kAsanInternalHeapMagic:
|
||||||
|
return Yellow();
|
||||||
|
default:
|
||||||
|
return Default();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const char *EndShadowByte() { return Default(); }
|
||||||
|
};
|
||||||
|
|
||||||
// ---------------------- Helper functions ----------------------- {{{1
|
// ---------------------- Helper functions ----------------------- {{{1
|
||||||
|
|
||||||
static void PrintBytes(const char *before, uptr *a) {
|
static void PrintShadowByte(const char *before, u8 byte,
|
||||||
u8 *bytes = (u8*)a;
|
const char *after = "\n") {
|
||||||
uptr byte_num = (SANITIZER_WORDSIZE) / 8;
|
Decorator d;
|
||||||
Printf("%s%p:", before, (void*)a);
|
Printf("%s%s%x%x%s%s", before,
|
||||||
for (uptr i = 0; i < byte_num; i++) {
|
d.ShadowByte(byte), byte >> 4, byte & 15, d.EndShadowByte(), after);
|
||||||
Printf(" %x%x", bytes[i] >> 4, bytes[i] & 15);
|
}
|
||||||
|
|
||||||
|
static void PrintShadowBytes(const char *before, u8 *bytes,
|
||||||
|
u8 *guilty, uptr n) {
|
||||||
|
Decorator d;
|
||||||
|
if (before)
|
||||||
|
Printf("%s%p:", before, bytes);
|
||||||
|
for (uptr i = 0; i < n; i++) {
|
||||||
|
u8 *p = bytes + i;
|
||||||
|
const char *before = p == guilty ? "[" :
|
||||||
|
p - 1 == guilty ? "" : " ";
|
||||||
|
const char *after = p == guilty ? "]" : "";
|
||||||
|
PrintShadowByte(before, *p, after);
|
||||||
}
|
}
|
||||||
Printf("\n");
|
Printf("\n");
|
||||||
}
|
}
|
||||||
|
|
@ -54,15 +122,35 @@ static void PrintShadowMemoryForAddress(uptr addr) {
|
||||||
if (!AddrIsInMem(addr))
|
if (!AddrIsInMem(addr))
|
||||||
return;
|
return;
|
||||||
uptr shadow_addr = MemToShadow(addr);
|
uptr shadow_addr = MemToShadow(addr);
|
||||||
Printf("Shadow byte and word:\n");
|
const uptr n_bytes_per_row = 16;
|
||||||
Printf(" %p: %x\n", (void*)shadow_addr, *(unsigned char*)shadow_addr);
|
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
|
||||||
uptr aligned_shadow = shadow_addr & ~(kWordSize - 1);
|
Printf("Shadow bytes around the buggy address:\n");
|
||||||
PrintBytes(" ", (uptr*)(aligned_shadow));
|
for (int i = -5; i <= 5; i++) {
|
||||||
Printf("More shadow bytes:\n");
|
|
||||||
for (int i = -4; i <= 4; i++) {
|
|
||||||
const char *prefix = (i == 0) ? "=>" : " ";
|
const char *prefix = (i == 0) ? "=>" : " ";
|
||||||
PrintBytes(prefix, (uptr*)(aligned_shadow + i * kWordSize));
|
PrintShadowBytes(prefix,
|
||||||
|
(u8*)(aligned_shadow + i * n_bytes_per_row),
|
||||||
|
(u8*)shadow_addr, n_bytes_per_row);
|
||||||
}
|
}
|
||||||
|
Printf("Shadow byte legend (one shadow byte represents %d "
|
||||||
|
"application bytes):\n", (int)SHADOW_GRANULARITY);
|
||||||
|
PrintShadowByte(" Addressable: ", 0);
|
||||||
|
Printf(" Partially addressable: ");
|
||||||
|
for (uptr i = 1; i < SHADOW_GRANULARITY; i++)
|
||||||
|
PrintShadowByte("", i, " ");
|
||||||
|
Printf("\n");
|
||||||
|
PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic);
|
||||||
|
PrintShadowByte(" Heap righ redzone: ", kAsanHeapRightRedzoneMagic);
|
||||||
|
PrintShadowByte(" Freed Heap region: ", kAsanHeapFreeMagic);
|
||||||
|
PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic);
|
||||||
|
PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic);
|
||||||
|
PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic);
|
||||||
|
PrintShadowByte(" Stack partial redzone: ", kAsanStackPartialRedzoneMagic);
|
||||||
|
PrintShadowByte(" Stack after return: ", kAsanStackAfterReturnMagic);
|
||||||
|
PrintShadowByte(" Stack use after scope: ", kAsanStackUseAfterScopeMagic);
|
||||||
|
PrintShadowByte(" Global redzone: ", kAsanGlobalRedzoneMagic);
|
||||||
|
PrintShadowByte(" Global init order: ", kAsanInitializationOrderMagic);
|
||||||
|
PrintShadowByte(" Poisoned by user: ", kAsanUserPoisonedMemoryMagic);
|
||||||
|
PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
|
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
|
||||||
|
|
@ -98,6 +186,8 @@ static void PrintGlobalNameIfASCII(const __asan_global &g) {
|
||||||
bool DescribeAddressRelativeToGlobal(uptr addr, const __asan_global &g) {
|
bool DescribeAddressRelativeToGlobal(uptr addr, const __asan_global &g) {
|
||||||
if (addr < g.beg - kGlobalAndStackRedzone) return false;
|
if (addr < g.beg - kGlobalAndStackRedzone) return false;
|
||||||
if (addr >= g.beg + g.size_with_redzone) return false;
|
if (addr >= g.beg + g.size_with_redzone) return false;
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Location());
|
||||||
Printf("%p is located ", (void*)addr);
|
Printf("%p is located ", (void*)addr);
|
||||||
if (addr < g.beg) {
|
if (addr < g.beg) {
|
||||||
Printf("%zd bytes to the left", g.beg - addr);
|
Printf("%zd bytes to the left", g.beg - addr);
|
||||||
|
|
@ -108,6 +198,7 @@ bool DescribeAddressRelativeToGlobal(uptr addr, const __asan_global &g) {
|
||||||
}
|
}
|
||||||
Printf(" of global variable '%s' (0x%zx) of size %zu\n",
|
Printf(" of global variable '%s' (0x%zx) of size %zu\n",
|
||||||
g.name, g.beg, g.size);
|
g.name, g.beg, g.size);
|
||||||
|
Printf("%s", d.EndLocation());
|
||||||
PrintGlobalNameIfASCII(g);
|
PrintGlobalNameIfASCII(g);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
@ -151,9 +242,12 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
|
||||||
internal_strncat(buf, frame_descr,
|
internal_strncat(buf, frame_descr,
|
||||||
Min(kBufSize,
|
Min(kBufSize,
|
||||||
static_cast<sptr>(name_end - frame_descr)));
|
static_cast<sptr>(name_end - frame_descr)));
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Location());
|
||||||
Printf("Address %p is located at offset %zu "
|
Printf("Address %p is located at offset %zu "
|
||||||
"in frame <%s> of T%d's stack:\n",
|
"in frame <%s> of T%d's stack:\n",
|
||||||
(void*)addr, offset, buf, t->tid());
|
(void*)addr, offset, Demangle(buf), t->tid());
|
||||||
|
Printf("%s", d.EndLocation());
|
||||||
// Report the number of stack objects.
|
// Report the number of stack objects.
|
||||||
char *p;
|
char *p;
|
||||||
uptr n_objects = internal_simple_strtoll(name_end, &p, 10);
|
uptr n_objects = internal_simple_strtoll(name_end, &p, 10);
|
||||||
|
|
@ -187,6 +281,8 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
|
||||||
static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
|
static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
|
||||||
uptr access_size) {
|
uptr access_size) {
|
||||||
uptr offset;
|
uptr offset;
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Location());
|
||||||
Printf("%p is located ", (void*)addr);
|
Printf("%p is located ", (void*)addr);
|
||||||
if (chunk.AddrIsInside(addr, access_size, &offset)) {
|
if (chunk.AddrIsInside(addr, access_size, &offset)) {
|
||||||
Printf("%zu bytes inside of", offset);
|
Printf("%zu bytes inside of", offset);
|
||||||
|
|
@ -199,6 +295,26 @@ static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
|
||||||
}
|
}
|
||||||
Printf(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
|
Printf(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
|
||||||
(void*)(chunk.Beg()), (void*)(chunk.End()));
|
(void*)(chunk.Beg()), (void*)(chunk.End()));
|
||||||
|
Printf("%s", d.EndLocation());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return " (thread_name) " or an empty string if the name is empty.
|
||||||
|
const char *ThreadNameWithParenthesis(AsanThreadSummary *t, char buff[],
|
||||||
|
uptr buff_len) {
|
||||||
|
const char *name = t->name();
|
||||||
|
if (*name == 0) return "";
|
||||||
|
buff[0] = 0;
|
||||||
|
internal_strncat(buff, " (", 3);
|
||||||
|
internal_strncat(buff, name, buff_len - 4);
|
||||||
|
internal_strncat(buff, ")", 2);
|
||||||
|
return buff;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *ThreadNameWithParenthesis(u32 tid, char buff[],
|
||||||
|
uptr buff_len) {
|
||||||
|
if (tid == kInvalidTid) return "";
|
||||||
|
AsanThreadSummary *t = asanThreadRegistry().FindByTid(tid);
|
||||||
|
return ThreadNameWithParenthesis(t, buff, buff_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DescribeHeapAddress(uptr addr, uptr access_size) {
|
void DescribeHeapAddress(uptr addr, uptr access_size) {
|
||||||
|
|
@ -212,20 +328,31 @@ void DescribeHeapAddress(uptr addr, uptr access_size) {
|
||||||
chunk.GetAllocStack(&alloc_stack);
|
chunk.GetAllocStack(&alloc_stack);
|
||||||
AsanThread *t = asanThreadRegistry().GetCurrent();
|
AsanThread *t = asanThreadRegistry().GetCurrent();
|
||||||
CHECK(t);
|
CHECK(t);
|
||||||
|
char tname[128];
|
||||||
|
Decorator d;
|
||||||
if (chunk.FreeTid() != kInvalidTid) {
|
if (chunk.FreeTid() != kInvalidTid) {
|
||||||
AsanThreadSummary *free_thread =
|
AsanThreadSummary *free_thread =
|
||||||
asanThreadRegistry().FindByTid(chunk.FreeTid());
|
asanThreadRegistry().FindByTid(chunk.FreeTid());
|
||||||
Printf("freed by thread T%d here:\n", free_thread->tid());
|
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
|
||||||
|
free_thread->tid(),
|
||||||
|
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
|
||||||
|
d.EndAllocation());
|
||||||
StackTrace free_stack;
|
StackTrace free_stack;
|
||||||
chunk.GetFreeStack(&free_stack);
|
chunk.GetFreeStack(&free_stack);
|
||||||
PrintStack(&free_stack);
|
PrintStack(&free_stack);
|
||||||
Printf("previously allocated by thread T%d here:\n", alloc_thread->tid());
|
Printf("%spreviously allocated by thread T%d%s here:%s\n",
|
||||||
|
d.Allocation(), alloc_thread->tid(),
|
||||||
|
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
|
||||||
|
d.EndAllocation());
|
||||||
PrintStack(&alloc_stack);
|
PrintStack(&alloc_stack);
|
||||||
DescribeThread(t->summary());
|
DescribeThread(t->summary());
|
||||||
DescribeThread(free_thread);
|
DescribeThread(free_thread);
|
||||||
DescribeThread(alloc_thread);
|
DescribeThread(alloc_thread);
|
||||||
} else {
|
} else {
|
||||||
Printf("allocated by thread T%d here:\n", alloc_thread->tid());
|
Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
|
||||||
|
alloc_thread->tid(),
|
||||||
|
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
|
||||||
|
d.EndAllocation());
|
||||||
PrintStack(&alloc_stack);
|
PrintStack(&alloc_stack);
|
||||||
DescribeThread(t->summary());
|
DescribeThread(t->summary());
|
||||||
DescribeThread(alloc_thread);
|
DescribeThread(alloc_thread);
|
||||||
|
|
@ -254,8 +381,13 @@ void DescribeThread(AsanThreadSummary *summary) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
summary->set_announced(true);
|
summary->set_announced(true);
|
||||||
Printf("Thread T%d created by T%d here:\n",
|
char tname[128];
|
||||||
summary->tid(), summary->parent_tid());
|
Printf("Thread T%d%s", summary->tid(),
|
||||||
|
ThreadNameWithParenthesis(summary->tid(), tname, sizeof(tname)));
|
||||||
|
Printf(" created by T%d%s here:\n",
|
||||||
|
summary->parent_tid(),
|
||||||
|
ThreadNameWithParenthesis(summary->parent_tid(),
|
||||||
|
tname, sizeof(tname)));
|
||||||
PrintStack(summary->stack());
|
PrintStack(summary->stack());
|
||||||
// Recursively described parent thread if needed.
|
// Recursively described parent thread if needed.
|
||||||
if (flags()->print_full_thread_history) {
|
if (flags()->print_full_thread_history) {
|
||||||
|
|
@ -291,7 +423,7 @@ class ScopedInErrorReport {
|
||||||
// Die() to bypass any additional checks.
|
// Die() to bypass any additional checks.
|
||||||
Exit(flags()->exitcode);
|
Exit(flags()->exitcode);
|
||||||
}
|
}
|
||||||
__asan_on_error();
|
ASAN_ON_ERROR();
|
||||||
reporting_thread_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
|
reporting_thread_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
|
||||||
Printf("===================================================="
|
Printf("===================================================="
|
||||||
"=============\n");
|
"=============\n");
|
||||||
|
|
@ -322,44 +454,79 @@ class ScopedInErrorReport {
|
||||||
|
|
||||||
void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
|
void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) {
|
||||||
ScopedInErrorReport in_report;
|
ScopedInErrorReport in_report;
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Warning());
|
||||||
Report("ERROR: AddressSanitizer: SEGV on unknown address %p"
|
Report("ERROR: AddressSanitizer: SEGV on unknown address %p"
|
||||||
" (pc %p sp %p bp %p T%d)\n",
|
" (pc %p sp %p bp %p T%d)\n",
|
||||||
(void*)addr, (void*)pc, (void*)sp, (void*)bp,
|
(void*)addr, (void*)pc, (void*)sp, (void*)bp,
|
||||||
asanThreadRegistry().GetCurrentTidOrInvalid());
|
asanThreadRegistry().GetCurrentTidOrInvalid());
|
||||||
|
Printf("%s", d.EndWarning());
|
||||||
Printf("AddressSanitizer can not provide additional info.\n");
|
Printf("AddressSanitizer can not provide additional info.\n");
|
||||||
GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp);
|
GET_STACK_TRACE_FATAL(pc, bp);
|
||||||
PrintStack(&stack);
|
PrintStack(&stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReportDoubleFree(uptr addr, StackTrace *stack) {
|
void ReportDoubleFree(uptr addr, StackTrace *stack) {
|
||||||
ScopedInErrorReport in_report;
|
ScopedInErrorReport in_report;
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Warning());
|
||||||
Report("ERROR: AddressSanitizer: attempting double-free on %p:\n", addr);
|
Report("ERROR: AddressSanitizer: attempting double-free on %p:\n", addr);
|
||||||
|
Printf("%s", d.EndWarning());
|
||||||
PrintStack(stack);
|
PrintStack(stack);
|
||||||
DescribeHeapAddress(addr, 1);
|
DescribeHeapAddress(addr, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReportFreeNotMalloced(uptr addr, StackTrace *stack) {
|
void ReportFreeNotMalloced(uptr addr, StackTrace *stack) {
|
||||||
ScopedInErrorReport in_report;
|
ScopedInErrorReport in_report;
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Warning());
|
||||||
Report("ERROR: AddressSanitizer: attempting free on address "
|
Report("ERROR: AddressSanitizer: attempting free on address "
|
||||||
"which was not malloc()-ed: %p\n", addr);
|
"which was not malloc()-ed: %p\n", addr);
|
||||||
|
Printf("%s", d.EndWarning());
|
||||||
PrintStack(stack);
|
PrintStack(stack);
|
||||||
DescribeHeapAddress(addr, 1);
|
DescribeHeapAddress(addr, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ReportAllocTypeMismatch(uptr addr, StackTrace *stack,
|
||||||
|
AllocType alloc_type,
|
||||||
|
AllocType dealloc_type) {
|
||||||
|
static const char *alloc_names[] =
|
||||||
|
{"INVALID", "malloc", "operator new", "operator new []"};
|
||||||
|
static const char *dealloc_names[] =
|
||||||
|
{"INVALID", "free", "operator delete", "operator delete []"};
|
||||||
|
CHECK_NE(alloc_type, dealloc_type);
|
||||||
|
ScopedInErrorReport in_report;
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Warning());
|
||||||
|
Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n",
|
||||||
|
alloc_names[alloc_type], dealloc_names[dealloc_type], addr);
|
||||||
|
Printf("%s", d.EndWarning());
|
||||||
|
PrintStack(stack);
|
||||||
|
DescribeHeapAddress(addr, 1);
|
||||||
|
Report("HINT: if you don't care about these warnings you may set "
|
||||||
|
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
|
||||||
|
}
|
||||||
|
|
||||||
void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
|
void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) {
|
||||||
ScopedInErrorReport in_report;
|
ScopedInErrorReport in_report;
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Warning());
|
||||||
Report("ERROR: AddressSanitizer: attempting to call "
|
Report("ERROR: AddressSanitizer: attempting to call "
|
||||||
"malloc_usable_size() for pointer which is "
|
"malloc_usable_size() for pointer which is "
|
||||||
"not owned: %p\n", addr);
|
"not owned: %p\n", addr);
|
||||||
|
Printf("%s", d.EndWarning());
|
||||||
PrintStack(stack);
|
PrintStack(stack);
|
||||||
DescribeHeapAddress(addr, 1);
|
DescribeHeapAddress(addr, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
|
void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
|
||||||
ScopedInErrorReport in_report;
|
ScopedInErrorReport in_report;
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Warning());
|
||||||
Report("ERROR: AddressSanitizer: attempting to call "
|
Report("ERROR: AddressSanitizer: attempting to call "
|
||||||
"__asan_get_allocated_size() for pointer which is "
|
"__asan_get_allocated_size() for pointer which is "
|
||||||
"not owned: %p\n", addr);
|
"not owned: %p\n", addr);
|
||||||
|
Printf("%s", d.EndWarning());
|
||||||
PrintStack(stack);
|
PrintStack(stack);
|
||||||
DescribeHeapAddress(addr, 1);
|
DescribeHeapAddress(addr, 1);
|
||||||
}
|
}
|
||||||
|
|
@ -368,9 +535,12 @@ void ReportStringFunctionMemoryRangesOverlap(
|
||||||
const char *function, const char *offset1, uptr length1,
|
const char *function, const char *offset1, uptr length1,
|
||||||
const char *offset2, uptr length2, StackTrace *stack) {
|
const char *offset2, uptr length2, StackTrace *stack) {
|
||||||
ScopedInErrorReport in_report;
|
ScopedInErrorReport in_report;
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Warning());
|
||||||
Report("ERROR: AddressSanitizer: %s-param-overlap: "
|
Report("ERROR: AddressSanitizer: %s-param-overlap: "
|
||||||
"memory ranges [%p,%p) and [%p, %p) overlap\n", \
|
"memory ranges [%p,%p) and [%p, %p) overlap\n", \
|
||||||
function, offset1, offset1 + length1, offset2, offset2 + length2);
|
function, offset1, offset1 + length1, offset2, offset2 + length2);
|
||||||
|
Printf("%s", d.EndWarning());
|
||||||
PrintStack(stack);
|
PrintStack(stack);
|
||||||
DescribeAddress((uptr)offset1, length1);
|
DescribeAddress((uptr)offset1, length1);
|
||||||
DescribeAddress((uptr)offset2, length2);
|
DescribeAddress((uptr)offset2, length2);
|
||||||
|
|
@ -463,17 +633,23 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Decorator d;
|
||||||
|
Printf("%s", d.Warning());
|
||||||
Report("ERROR: AddressSanitizer: %s on address "
|
Report("ERROR: AddressSanitizer: %s on address "
|
||||||
"%p at pc 0x%zx bp 0x%zx sp 0x%zx\n",
|
"%p at pc 0x%zx bp 0x%zx sp 0x%zx\n",
|
||||||
bug_descr, (void*)addr, pc, bp, sp);
|
bug_descr, (void*)addr, pc, bp, sp);
|
||||||
|
Printf("%s", d.EndWarning());
|
||||||
|
|
||||||
u32 curr_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
|
u32 curr_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
|
||||||
Printf("%s of size %zu at %p thread T%d\n",
|
char tname[128];
|
||||||
access_size ? (is_write ? "WRITE" : "READ") : "ACCESS",
|
Printf("%s%s of size %zu at %p thread T%d%s%s\n",
|
||||||
access_size, (void*)addr, curr_tid);
|
d.Access(),
|
||||||
|
access_size ? (is_write ? "WRITE" : "READ") : "ACCESS",
|
||||||
|
access_size, (void*)addr, curr_tid,
|
||||||
|
ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)),
|
||||||
|
d.EndAccess());
|
||||||
|
|
||||||
GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp);
|
GET_STACK_TRACE_FATAL(pc, bp);
|
||||||
PrintStack(&stack);
|
PrintStack(&stack);
|
||||||
|
|
||||||
DescribeAddress(addr, access_size);
|
DescribeAddress(addr, access_size);
|
||||||
|
|
@ -491,7 +667,13 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __asan_describe_address(uptr addr) {
|
||||||
|
DescribeAddress(addr, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||||
// Provide default implementation of __asan_on_error that does nothing
|
// Provide default implementation of __asan_on_error that does nothing
|
||||||
// and may be overriden by user.
|
// and may be overriden by user.
|
||||||
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
|
||||||
void __asan_on_error() {}
|
void __asan_on_error() {}
|
||||||
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,7 @@
|
||||||
// ASan-private header for error reporting functions.
|
// ASan-private header for error reporting functions.
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#include "asan_allocator.h"
|
||||||
#include "asan_internal.h"
|
#include "asan_internal.h"
|
||||||
#include "asan_thread.h"
|
#include "asan_thread.h"
|
||||||
#include "sanitizer/asan_interface.h"
|
#include "sanitizer/asan_interface.h"
|
||||||
|
|
@ -32,6 +33,9 @@ void DescribeThread(AsanThreadSummary *summary);
|
||||||
void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
|
void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr);
|
||||||
void NORETURN ReportDoubleFree(uptr addr, StackTrace *stack);
|
void NORETURN ReportDoubleFree(uptr addr, StackTrace *stack);
|
||||||
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *stack);
|
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *stack);
|
||||||
|
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *stack,
|
||||||
|
AllocType alloc_type,
|
||||||
|
AllocType dealloc_type);
|
||||||
void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,
|
void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,
|
||||||
StackTrace *stack);
|
StackTrace *stack);
|
||||||
void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr,
|
void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr,
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
|
||||||
file, line, cond, (uptr)v1, (uptr)v2);
|
file, line, cond, (uptr)v1, (uptr)v2);
|
||||||
// FIXME: check for infinite recursion without a thread-local counter here.
|
// FIXME: check for infinite recursion without a thread-local counter here.
|
||||||
PRINT_CURRENT_STACK();
|
PRINT_CURRENT_STACK();
|
||||||
ShowStatsAndAbort();
|
Die();
|
||||||
}
|
}
|
||||||
|
|
||||||
// -------------------------- Flags ------------------------- {{{1
|
// -------------------------- Flags ------------------------- {{{1
|
||||||
|
|
@ -64,6 +64,10 @@ Flags *flags() {
|
||||||
return &asan_flags;
|
return &asan_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const char *MaybeCallAsanDefaultOptions() {
|
||||||
|
return (&__asan_default_options) ? __asan_default_options() : "";
|
||||||
|
}
|
||||||
|
|
||||||
static void ParseFlagsFromString(Flags *f, const char *str) {
|
static void ParseFlagsFromString(Flags *f, const char *str) {
|
||||||
ParseFlag(str, &f->quarantine_size, "quarantine_size");
|
ParseFlag(str, &f->quarantine_size, "quarantine_size");
|
||||||
ParseFlag(str, &f->symbolize, "symbolize");
|
ParseFlag(str, &f->symbolize, "symbolize");
|
||||||
|
|
@ -98,21 +102,20 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
|
||||||
ParseFlag(str, &f->allow_reexec, "allow_reexec");
|
ParseFlag(str, &f->allow_reexec, "allow_reexec");
|
||||||
ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
|
ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
|
||||||
ParseFlag(str, &f->log_path, "log_path");
|
ParseFlag(str, &f->log_path, "log_path");
|
||||||
|
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
|
||||||
|
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
|
||||||
|
ParseFlag(str, &f->poison_heap, "poison_heap");
|
||||||
|
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
|
||||||
|
ParseFlag(str, &f->use_stack_depot, "use_stack_depot");
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
SANITIZER_WEAK_ATTRIBUTE
|
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE
|
|
||||||
const char* __asan_default_options() { return ""; }
|
|
||||||
} // extern "C"
|
|
||||||
|
|
||||||
void InitializeFlags(Flags *f, const char *env) {
|
void InitializeFlags(Flags *f, const char *env) {
|
||||||
internal_memset(f, 0, sizeof(*f));
|
internal_memset(f, 0, sizeof(*f));
|
||||||
|
|
||||||
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
|
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
|
||||||
f->symbolize = false;
|
f->symbolize = false;
|
||||||
f->verbosity = 0;
|
f->verbosity = 0;
|
||||||
f->redzone = (ASAN_LOW_MEMORY) ? 64 : 128;
|
f->redzone = ASAN_ALLOCATOR_VERSION == 2 ? 16 : (ASAN_LOW_MEMORY) ? 64 : 128;
|
||||||
f->debug = false;
|
f->debug = false;
|
||||||
f->report_globals = 1;
|
f->report_globals = 1;
|
||||||
f->check_initialization_order = true;
|
f->check_initialization_order = true;
|
||||||
|
|
@ -137,12 +140,17 @@ void InitializeFlags(Flags *f, const char *env) {
|
||||||
f->allow_reexec = true;
|
f->allow_reexec = true;
|
||||||
f->print_full_thread_history = true;
|
f->print_full_thread_history = true;
|
||||||
f->log_path = 0;
|
f->log_path = 0;
|
||||||
|
f->fast_unwind_on_fatal = true;
|
||||||
|
f->fast_unwind_on_malloc = true;
|
||||||
|
f->poison_heap = true;
|
||||||
|
f->alloc_dealloc_mismatch = true;
|
||||||
|
f->use_stack_depot = true; // Only affects allocator2.
|
||||||
|
|
||||||
// Override from user-specified string.
|
// Override from user-specified string.
|
||||||
ParseFlagsFromString(f, __asan_default_options());
|
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
|
||||||
if (flags()->verbosity) {
|
if (flags()->verbosity) {
|
||||||
Report("Using the defaults from __asan_default_options: %s\n",
|
Report("Using the defaults from __asan_default_options: %s\n",
|
||||||
__asan_default_options());
|
MaybeCallAsanDefaultOptions());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Override from command line.
|
// Override from command line.
|
||||||
|
|
@ -239,15 +247,12 @@ static NOINLINE void force_interface_symbols() {
|
||||||
case 27: __asan_set_error_exit_code(0); break;
|
case 27: __asan_set_error_exit_code(0); break;
|
||||||
case 28: __asan_stack_free(0, 0, 0); break;
|
case 28: __asan_stack_free(0, 0, 0); break;
|
||||||
case 29: __asan_stack_malloc(0, 0); break;
|
case 29: __asan_stack_malloc(0, 0); break;
|
||||||
case 30: __asan_on_error(); break;
|
case 30: __asan_before_dynamic_init(0, 0); break;
|
||||||
case 31: __asan_default_options(); break;
|
case 31: __asan_after_dynamic_init(); break;
|
||||||
case 32: __asan_before_dynamic_init(0, 0); break;
|
case 32: __asan_poison_stack_memory(0, 0); break;
|
||||||
case 33: __asan_after_dynamic_init(); break;
|
case 33: __asan_unpoison_stack_memory(0, 0); break;
|
||||||
case 34: __asan_malloc_hook(0, 0); break;
|
case 34: __asan_region_is_poisoned(0, 0); break;
|
||||||
case 35: __asan_free_hook(0); break;
|
case 35: __asan_describe_address(0); break;
|
||||||
case 36: __asan_symbolize(0, 0, 0); break;
|
|
||||||
case 37: __asan_poison_stack_memory(0, 0); break;
|
|
||||||
case 38: __asan_unpoison_stack_memory(0, 0); break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -261,6 +266,13 @@ static void asan_atexit() {
|
||||||
// ---------------------- Interface ---------------- {{{1
|
// ---------------------- Interface ---------------- {{{1
|
||||||
using namespace __asan; // NOLINT
|
using namespace __asan; // NOLINT
|
||||||
|
|
||||||
|
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||||
|
extern "C" {
|
||||||
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
|
const char* __asan_default_options() { return ""; }
|
||||||
|
} // extern "C"
|
||||||
|
#endif
|
||||||
|
|
||||||
int NOINLINE __asan_set_error_exit_code(int exit_code) {
|
int NOINLINE __asan_set_error_exit_code(int exit_code) {
|
||||||
int old = flags()->exitcode;
|
int old = flags()->exitcode;
|
||||||
flags()->exitcode = exit_code;
|
flags()->exitcode = exit_code;
|
||||||
|
|
|
||||||
|
|
@ -15,9 +15,15 @@
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
|
static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer,
|
||||||
|
int out_size) {
|
||||||
|
return (&__asan_symbolize) ? __asan_symbolize(pc, out_buffer, out_size)
|
||||||
|
: false;
|
||||||
|
}
|
||||||
|
|
||||||
void PrintStack(StackTrace *stack) {
|
void PrintStack(StackTrace *stack) {
|
||||||
stack->PrintStack(stack->trace, stack->size, flags()->symbolize,
|
stack->PrintStack(stack->trace, stack->size, flags()->symbolize,
|
||||||
flags()->strip_path_prefix, __asan_symbolize);
|
flags()->strip_path_prefix, MaybeCallAsanSymbolize);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
@ -27,7 +33,7 @@ void PrintStack(StackTrace *stack) {
|
||||||
// Provide default implementation of __asan_symbolize that does nothing
|
// Provide default implementation of __asan_symbolize that does nothing
|
||||||
// and may be overriden by user if he wants to use his own symbolization.
|
// and may be overriden by user if he wants to use his own symbolization.
|
||||||
// ASan on Windows has its own implementation of this.
|
// ASan on Windows has its own implementation of this.
|
||||||
#ifndef _WIN32
|
#if !defined(_WIN32) && !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||||
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE
|
||||||
bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
|
bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) {
|
||||||
return false;
|
return false;
|
||||||
|
|
|
||||||
|
|
@ -13,10 +13,11 @@
|
||||||
#define ASAN_STACK_H
|
#define ASAN_STACK_H
|
||||||
|
|
||||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||||
|
#include "asan_flags.h"
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp);
|
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast);
|
||||||
void PrintStack(StackTrace *stack);
|
void PrintStack(StackTrace *stack);
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
@ -25,27 +26,38 @@ void PrintStack(StackTrace *stack);
|
||||||
// The pc will be in the position 0 of the resulting stack trace.
|
// The pc will be in the position 0 of the resulting stack trace.
|
||||||
// The bp may refer to the current frame or to the caller's frame.
|
// The bp may refer to the current frame or to the caller's frame.
|
||||||
// fast_unwind is currently unused.
|
// fast_unwind is currently unused.
|
||||||
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp) \
|
#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
|
||||||
StackTrace stack; \
|
StackTrace stack; \
|
||||||
GetStackTrace(&stack, max_s, pc, bp)
|
GetStackTrace(&stack, max_s, pc, bp, fast)
|
||||||
|
|
||||||
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
|
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
|
||||||
// as early as possible (in functions exposed to the user), as we generally
|
// as early as possible (in functions exposed to the user), as we generally
|
||||||
// don't want stack trace to contain functions from ASan internals.
|
// don't want stack trace to contain functions from ASan internals.
|
||||||
|
|
||||||
#define GET_STACK_TRACE_HERE(max_size) \
|
#define GET_STACK_TRACE(max_size, fast) \
|
||||||
GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
|
GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
|
||||||
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
|
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), fast)
|
||||||
|
|
||||||
#define GET_STACK_TRACE_HERE_FOR_MALLOC \
|
#define GET_STACK_TRACE_FATAL(pc, bp) \
|
||||||
GET_STACK_TRACE_HERE(flags()->malloc_context_size)
|
GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
|
||||||
|
flags()->fast_unwind_on_fatal)
|
||||||
|
|
||||||
#define GET_STACK_TRACE_HERE_FOR_FREE(ptr) \
|
#define GET_STACK_TRACE_FATAL_HERE \
|
||||||
GET_STACK_TRACE_HERE(flags()->malloc_context_size)
|
GET_STACK_TRACE(kStackTraceMax, flags()->fast_unwind_on_fatal)
|
||||||
|
|
||||||
|
#define GET_STACK_TRACE_THREAD \
|
||||||
|
GET_STACK_TRACE(kStackTraceMax, true)
|
||||||
|
|
||||||
|
#define GET_STACK_TRACE_MALLOC \
|
||||||
|
GET_STACK_TRACE(flags()->malloc_context_size, \
|
||||||
|
flags()->fast_unwind_on_malloc)
|
||||||
|
|
||||||
|
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
|
||||||
|
|
||||||
#define PRINT_CURRENT_STACK() \
|
#define PRINT_CURRENT_STACK() \
|
||||||
{ \
|
{ \
|
||||||
GET_STACK_TRACE_HERE(kStackTraceMax); \
|
GET_STACK_TRACE(kStackTraceMax, \
|
||||||
|
flags()->fast_unwind_on_fatal); \
|
||||||
PrintStack(&stack); \
|
PrintStack(&stack); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,7 @@
|
||||||
#include "asan_stats.h"
|
#include "asan_stats.h"
|
||||||
#include "asan_thread_registry.h"
|
#include "asan_thread_registry.h"
|
||||||
#include "sanitizer/asan_interface.h"
|
#include "sanitizer/asan_interface.h"
|
||||||
|
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||||
|
|
||||||
namespace __asan {
|
namespace __asan {
|
||||||
|
|
||||||
|
|
@ -40,8 +41,9 @@ void AsanStats::Print() {
|
||||||
Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees);
|
Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees);
|
||||||
Printf("Stats: %zuM really freed by %zu calls\n",
|
Printf("Stats: %zuM really freed by %zu calls\n",
|
||||||
really_freed>>20, real_frees);
|
really_freed>>20, real_frees);
|
||||||
Printf("Stats: %zuM (%zu full pages) mmaped in %zu calls\n",
|
Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n",
|
||||||
mmaped>>20, mmaped / GetPageSizeCached(), mmaps);
|
(mmaped-munmaped)>>20, mmaped>>20, munmaped>>20,
|
||||||
|
mmaps, munmaps);
|
||||||
|
|
||||||
PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
|
PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
|
||||||
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
|
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
|
||||||
|
|
@ -59,6 +61,10 @@ static void PrintAccumulatedStats() {
|
||||||
// Use lock to keep reports from mixing up.
|
// Use lock to keep reports from mixing up.
|
||||||
ScopedLock lock(&print_lock);
|
ScopedLock lock(&print_lock);
|
||||||
stats.Print();
|
stats.Print();
|
||||||
|
StackDepotStats *stack_depot_stats = StackDepotGetStats();
|
||||||
|
Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
|
||||||
|
stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
|
||||||
|
PrintInternalAllocatorStats();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace __asan
|
} // namespace __asan
|
||||||
|
|
|
||||||
|
|
@ -35,6 +35,8 @@ struct AsanStats {
|
||||||
uptr realloced;
|
uptr realloced;
|
||||||
uptr mmaps;
|
uptr mmaps;
|
||||||
uptr mmaped;
|
uptr mmaped;
|
||||||
|
uptr munmaps;
|
||||||
|
uptr munmaped;
|
||||||
uptr mmaped_by_size[kNumberOfSizeClasses];
|
uptr mmaped_by_size[kNumberOfSizeClasses];
|
||||||
uptr malloced_by_size[kNumberOfSizeClasses];
|
uptr malloced_by_size[kNumberOfSizeClasses];
|
||||||
uptr freed_by_size[kNumberOfSizeClasses];
|
uptr freed_by_size[kNumberOfSizeClasses];
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,7 @@ class AsanThreadSummary {
|
||||||
internal_memcpy(&stack_, stack, sizeof(*stack));
|
internal_memcpy(&stack_, stack, sizeof(*stack));
|
||||||
}
|
}
|
||||||
thread_ = 0;
|
thread_ = 0;
|
||||||
|
name_[0] = 0;
|
||||||
}
|
}
|
||||||
u32 tid() { return tid_; }
|
u32 tid() { return tid_; }
|
||||||
void set_tid(u32 tid) { tid_ = tid; }
|
void set_tid(u32 tid) { tid_ = tid; }
|
||||||
|
|
@ -47,6 +48,10 @@ class AsanThreadSummary {
|
||||||
AsanThread *thread() { return thread_; }
|
AsanThread *thread() { return thread_; }
|
||||||
void set_thread(AsanThread *thread) { thread_ = thread; }
|
void set_thread(AsanThread *thread) { thread_ = thread; }
|
||||||
static void TSDDtor(void *tsd);
|
static void TSDDtor(void *tsd);
|
||||||
|
void set_name(const char *name) {
|
||||||
|
internal_strncpy(name_, name, sizeof(name_) - 1);
|
||||||
|
}
|
||||||
|
const char *name() { return name_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u32 tid_;
|
u32 tid_;
|
||||||
|
|
@ -54,8 +59,12 @@ class AsanThreadSummary {
|
||||||
bool announced_;
|
bool announced_;
|
||||||
StackTrace stack_;
|
StackTrace stack_;
|
||||||
AsanThread *thread_;
|
AsanThread *thread_;
|
||||||
|
char name_[128];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// AsanThreadSummary objects are never freed, so we need many of them.
|
||||||
|
COMPILER_CHECK(sizeof(AsanThreadSummary) <= 4094);
|
||||||
|
|
||||||
// AsanThread are stored in TSD and destroyed when the thread dies.
|
// AsanThread are stored in TSD and destroyed when the thread dies.
|
||||||
class AsanThread {
|
class AsanThread {
|
||||||
public:
|
public:
|
||||||
|
|
|
||||||
|
|
@ -121,13 +121,14 @@ uptr AsanThreadRegistry::GetCurrentAllocatedBytes() {
|
||||||
uptr AsanThreadRegistry::GetHeapSize() {
|
uptr AsanThreadRegistry::GetHeapSize() {
|
||||||
ScopedLock lock(&mu_);
|
ScopedLock lock(&mu_);
|
||||||
UpdateAccumulatedStatsUnlocked();
|
UpdateAccumulatedStatsUnlocked();
|
||||||
return accumulated_stats_.mmaped;
|
return accumulated_stats_.mmaped - accumulated_stats_.munmaped;
|
||||||
}
|
}
|
||||||
|
|
||||||
uptr AsanThreadRegistry::GetFreeBytes() {
|
uptr AsanThreadRegistry::GetFreeBytes() {
|
||||||
ScopedLock lock(&mu_);
|
ScopedLock lock(&mu_);
|
||||||
UpdateAccumulatedStatsUnlocked();
|
UpdateAccumulatedStatsUnlocked();
|
||||||
uptr total_free = accumulated_stats_.mmaped
|
uptr total_free = accumulated_stats_.mmaped
|
||||||
|
- accumulated_stats_.munmaped
|
||||||
+ accumulated_stats_.really_freed
|
+ accumulated_stats_.really_freed
|
||||||
+ accumulated_stats_.really_freed_redzones;
|
+ accumulated_stats_.really_freed_redzones;
|
||||||
uptr total_used = accumulated_stats_.malloced
|
uptr total_used = accumulated_stats_.malloced
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,8 @@ static AsanLock dbghelp_lock(LINKER_INITIALIZED);
|
||||||
static bool dbghelp_initialized = false;
|
static bool dbghelp_initialized = false;
|
||||||
#pragma comment(lib, "dbghelp.lib")
|
#pragma comment(lib, "dbghelp.lib")
|
||||||
|
|
||||||
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
|
void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
|
||||||
|
(void)fast;
|
||||||
stack->max_size = max_s;
|
stack->max_size = max_s;
|
||||||
void *tmp[kStackTraceMax];
|
void *tmp[kStackTraceMax];
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -115,6 +115,15 @@ extern "C" {
|
||||||
bool __asan_address_is_poisoned(void const volatile *addr)
|
bool __asan_address_is_poisoned(void const volatile *addr)
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE;
|
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
|
// If at least on byte in [beg, beg+size) is poisoned, return the address
|
||||||
|
// of the first such byte. Otherwise return 0.
|
||||||
|
uptr __asan_region_is_poisoned(uptr beg, uptr size)
|
||||||
|
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
|
// Print the description of addr (useful when debugging in gdb).
|
||||||
|
void __asan_describe_address(uptr addr)
|
||||||
|
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
// This is an internal function that is called to report an error.
|
// This is an internal function that is called to report an error.
|
||||||
// However it is still a part of the interface because users may want to
|
// However it is still a part of the interface because users may want to
|
||||||
// set a breakpoint on this function in a debugger.
|
// set a breakpoint on this function in a debugger.
|
||||||
|
|
@ -138,7 +147,7 @@ extern "C" {
|
||||||
// User may provide function that would be called right when ASan detects
|
// User may provide function that would be called right when ASan detects
|
||||||
// an error. This can be used to notice cases when ASan detects an error, but
|
// an error. This can be used to notice cases when ASan detects an error, but
|
||||||
// the program crashes before ASan report is printed.
|
// the program crashes before ASan report is printed.
|
||||||
void __asan_on_error()
|
/* OPTIONAL */ void __asan_on_error()
|
||||||
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
// User may provide its own implementation for symbolization function.
|
// User may provide its own implementation for symbolization function.
|
||||||
|
|
@ -146,7 +155,8 @@ extern "C" {
|
||||||
// "out_buffer". Description should be at most "out_size" bytes long.
|
// "out_buffer". Description should be at most "out_size" bytes long.
|
||||||
// User-specified function should return true if symbolization was
|
// User-specified function should return true if symbolization was
|
||||||
// successful.
|
// successful.
|
||||||
bool __asan_symbolize(const void *pc, char *out_buffer, int out_size)
|
/* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer,
|
||||||
|
int out_size)
|
||||||
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
// Returns the estimated number of bytes that will be reserved by allocator
|
// Returns the estimated number of bytes that will be reserved by allocator
|
||||||
|
|
@ -186,20 +196,19 @@ extern "C" {
|
||||||
void __asan_print_accumulated_stats()
|
void __asan_print_accumulated_stats()
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE;
|
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
// This function may be overriden by user to provide a string containing
|
// This function may be optionally provided by user and should return
|
||||||
// ASan runtime options. See asan_flags.h for details.
|
// a string containing ASan runtime options. See asan_flags.h for details.
|
||||||
const char* __asan_default_options()
|
/* OPTIONAL */ const char* __asan_default_options()
|
||||||
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
// Malloc hooks that may be overriden by user.
|
// Malloc hooks that may be optionally provided by user.
|
||||||
// __asan_malloc_hook(ptr, size) is called immediately after
|
// __asan_malloc_hook(ptr, size) is called immediately after
|
||||||
// allocation of "size" bytes, which returned "ptr".
|
// allocation of "size" bytes, which returned "ptr".
|
||||||
// __asan_free_hook(ptr) is called immediately before
|
// __asan_free_hook(ptr) is called immediately before
|
||||||
// deallocation of "ptr".
|
// deallocation of "ptr".
|
||||||
// If user doesn't provide implementations of these hooks, they are no-op.
|
/* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size)
|
||||||
void __asan_malloc_hook(void *ptr, uptr size)
|
|
||||||
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
void __asan_free_hook(void *ptr)
|
/* OPTIONAL */ void __asan_free_hook(void *ptr)
|
||||||
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
} // extern "C"
|
} // extern "C"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,12 @@
|
||||||
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
|
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef __linux__
|
||||||
|
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
|
||||||
|
#else
|
||||||
|
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
|
||||||
|
#endif
|
||||||
|
|
||||||
// __has_feature
|
// __has_feature
|
||||||
#if !defined(__has_feature)
|
#if !defined(__has_feature)
|
||||||
# define __has_feature(x) 0
|
# define __has_feature(x) 0
|
||||||
|
|
@ -73,6 +79,12 @@ extern "C" {
|
||||||
// stderr.
|
// stderr.
|
||||||
void __sanitizer_set_report_fd(int fd)
|
void __sanitizer_set_report_fd(int fd)
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE;
|
SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
|
// Notify the tools that the sandbox is going to be turned on. The reserved
|
||||||
|
// parameter will be used in the future to hold a structure with functions
|
||||||
|
// that the tools may call to bypass the sandbox.
|
||||||
|
void __sanitizer_sandbox_on_notify(void *reserved)
|
||||||
|
SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE;
|
||||||
} // extern "C"
|
} // extern "C"
|
||||||
|
|
||||||
#endif // SANITIZER_COMMON_INTERFACE_DEFS_H
|
#endif // SANITIZER_COMMON_INTERFACE_DEFS_H
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,15 @@
|
||||||
# error "Interception doesn't work on this operating system."
|
# error "Interception doesn't work on this operating system."
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#include "sanitizer/common_interface_defs.h"
|
||||||
|
|
||||||
|
// These typedefs should be used only in the interceptor definitions to replace
|
||||||
|
// the standard system types (e.g. SSIZE_T instead of ssize_t)
|
||||||
|
typedef __sanitizer::uptr SIZE_T;
|
||||||
|
typedef __sanitizer::sptr SSIZE_T;
|
||||||
|
typedef __sanitizer::u64 OFF_T;
|
||||||
|
typedef __sanitizer::u64 OFF64_T;
|
||||||
|
|
||||||
// How to use this library:
|
// How to use this library:
|
||||||
// 1) Include this header to define your own interceptors
|
// 1) Include this header to define your own interceptors
|
||||||
// (see details below).
|
// (see details below).
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,7 @@ sanitizer_common_files = \
|
||||||
sanitizer_stackdepot.cc \
|
sanitizer_stackdepot.cc \
|
||||||
sanitizer_stacktrace.cc \
|
sanitizer_stacktrace.cc \
|
||||||
sanitizer_symbolizer.cc \
|
sanitizer_symbolizer.cc \
|
||||||
|
sanitizer_symbolizer_itanium.cc \
|
||||||
sanitizer_symbolizer_linux.cc \
|
sanitizer_symbolizer_linux.cc \
|
||||||
sanitizer_symbolizer_mac.cc \
|
sanitizer_symbolizer_mac.cc \
|
||||||
sanitizer_symbolizer_win.cc \
|
sanitizer_symbolizer_win.cc \
|
||||||
|
|
|
||||||
|
|
@ -59,9 +59,9 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
|
||||||
sanitizer_flags.lo sanitizer_libc.lo sanitizer_linux.lo \
|
sanitizer_flags.lo sanitizer_libc.lo sanitizer_linux.lo \
|
||||||
sanitizer_mac.lo sanitizer_posix.lo sanitizer_printf.lo \
|
sanitizer_mac.lo sanitizer_posix.lo sanitizer_printf.lo \
|
||||||
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
|
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
|
||||||
sanitizer_symbolizer.lo sanitizer_symbolizer_linux.lo \
|
sanitizer_symbolizer.lo sanitizer_symbolizer_itanium.lo \
|
||||||
sanitizer_symbolizer_mac.lo sanitizer_symbolizer_win.lo \
|
sanitizer_symbolizer_linux.lo sanitizer_symbolizer_mac.lo \
|
||||||
sanitizer_win.lo
|
sanitizer_symbolizer_win.lo sanitizer_win.lo
|
||||||
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
|
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
|
||||||
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
|
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
|
||||||
DEFAULT_INCLUDES = -I.@am__isrc@
|
DEFAULT_INCLUDES = -I.@am__isrc@
|
||||||
|
|
@ -236,6 +236,7 @@ sanitizer_common_files = \
|
||||||
sanitizer_stackdepot.cc \
|
sanitizer_stackdepot.cc \
|
||||||
sanitizer_stacktrace.cc \
|
sanitizer_stacktrace.cc \
|
||||||
sanitizer_symbolizer.cc \
|
sanitizer_symbolizer.cc \
|
||||||
|
sanitizer_symbolizer_itanium.cc \
|
||||||
sanitizer_symbolizer_linux.cc \
|
sanitizer_symbolizer_linux.cc \
|
||||||
sanitizer_symbolizer_mac.cc \
|
sanitizer_symbolizer_mac.cc \
|
||||||
sanitizer_symbolizer_win.cc \
|
sanitizer_symbolizer_win.cc \
|
||||||
|
|
@ -345,6 +346,7 @@ distclean-compile:
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
|
||||||
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_itanium.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_linux.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_linux.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
|
||||||
|
|
|
||||||
|
|
@ -20,76 +20,140 @@
|
||||||
|
|
||||||
namespace __sanitizer {
|
namespace __sanitizer {
|
||||||
|
|
||||||
// Maps size class id to size and back.
|
// SizeClassMap maps allocation sizes into size classes and back.
|
||||||
template <uptr l0, uptr l1, uptr l2, uptr l3, uptr l4, uptr l5,
|
// Class 0 corresponds to size 0.
|
||||||
uptr s0, uptr s1, uptr s2, uptr s3, uptr s4,
|
// Classes 1 - 16 correspond to sizes 8 - 128 (size = class_id * 8).
|
||||||
uptr c0, uptr c1, uptr c2, uptr c3, uptr c4>
|
// Next 8 classes: 128 + i * 16 (i = 1 to 8).
|
||||||
class SplineSizeClassMap {
|
// Next 8 classes: 256 + i * 32 (i = 1 to 8).
|
||||||
private:
|
// ...
|
||||||
// Here we use a spline composed of 5 polynomials of oder 1.
|
// Next 8 classes: 2^k + i * 2^(k-3) (i = 1 to 8).
|
||||||
// The first size class is l0, then the classes go with step s0
|
// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
|
||||||
// untill they reach l1, after which they go with step s1 and so on.
|
//
|
||||||
// Steps should be powers of two for cheap division.
|
// This structure of the size class map gives us:
|
||||||
// The size of the last size class should be a power of two.
|
// - Efficient table-free class-to-size and size-to-class functions.
|
||||||
// There should be at most 256 size classes.
|
// - Difference between two consequent size classes is betweed 12% and 6%
|
||||||
static const uptr u0 = 0 + (l1 - l0) / s0;
|
//
|
||||||
static const uptr u1 = u0 + (l2 - l1) / s1;
|
// This class also gives a hint to a thread-caching allocator about the amount
|
||||||
static const uptr u2 = u1 + (l3 - l2) / s2;
|
// of chunks that need to be cached per-thread:
|
||||||
static const uptr u3 = u2 + (l4 - l3) / s3;
|
// - kMaxNumCached is the maximal number of chunks per size class.
|
||||||
static const uptr u4 = u3 + (l5 - l4) / s4;
|
// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
|
||||||
|
//
|
||||||
|
// Part of output of SizeClassMap::Print():
|
||||||
|
// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
|
||||||
|
// c01 => s: 8 diff: +8 00% l 3 cached: 256 2048; id 1
|
||||||
|
// c02 => s: 16 diff: +8 100% l 4 cached: 256 4096; id 2
|
||||||
|
// ...
|
||||||
|
// c07 => s: 56 diff: +8 16% l 5 cached: 256 14336; id 7
|
||||||
|
//
|
||||||
|
// c08 => s: 64 diff: +8 14% l 6 cached: 256 16384; id 8
|
||||||
|
// ...
|
||||||
|
// c15 => s: 120 diff: +8 07% l 6 cached: 256 30720; id 15
|
||||||
|
//
|
||||||
|
// c16 => s: 128 diff: +8 06% l 7 cached: 256 32768; id 16
|
||||||
|
// c17 => s: 144 diff: +16 12% l 7 cached: 227 32688; id 17
|
||||||
|
// ...
|
||||||
|
// c23 => s: 240 diff: +16 07% l 7 cached: 136 32640; id 23
|
||||||
|
//
|
||||||
|
// c24 => s: 256 diff: +16 06% l 8 cached: 128 32768; id 24
|
||||||
|
// c25 => s: 288 diff: +32 12% l 8 cached: 113 32544; id 25
|
||||||
|
// ...
|
||||||
|
// c31 => s: 480 diff: +32 07% l 8 cached: 68 32640; id 31
|
||||||
|
//
|
||||||
|
// c32 => s: 512 diff: +32 06% l 9 cached: 64 32768; id 32
|
||||||
|
|
||||||
|
|
||||||
|
template <uptr kMaxSizeLog, uptr kMaxNumCached, uptr kMaxBytesCachedLog>
|
||||||
|
class SizeClassMap {
|
||||||
|
static const uptr kMinSizeLog = 3;
|
||||||
|
static const uptr kMidSizeLog = kMinSizeLog + 4;
|
||||||
|
static const uptr kMinSize = 1 << kMinSizeLog;
|
||||||
|
static const uptr kMidSize = 1 << kMidSizeLog;
|
||||||
|
static const uptr kMidClass = kMidSize / kMinSize;
|
||||||
|
static const uptr S = 3;
|
||||||
|
static const uptr M = (1 << S) - 1;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// The number of size classes should be a power of two for fast division.
|
static const uptr kMaxSize = 1 << kMaxSizeLog;
|
||||||
static const uptr kNumClasses = u4 + 1;
|
static const uptr kNumClasses =
|
||||||
static const uptr kMaxSize = l5;
|
kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
|
||||||
static const uptr kMinSize = l0;
|
COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
|
||||||
|
static const uptr kNumClassesRounded =
|
||||||
COMPILER_CHECK(kNumClasses <= 256);
|
kNumClasses == 32 ? 32 :
|
||||||
COMPILER_CHECK((kNumClasses & (kNumClasses - 1)) == 0);
|
kNumClasses <= 64 ? 64 :
|
||||||
COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
|
kNumClasses <= 128 ? 128 : 256;
|
||||||
|
|
||||||
static uptr Size(uptr class_id) {
|
static uptr Size(uptr class_id) {
|
||||||
if (class_id <= u0) return l0 + s0 * (class_id - 0);
|
if (class_id <= kMidClass)
|
||||||
if (class_id <= u1) return l1 + s1 * (class_id - u0);
|
return kMinSize * class_id;
|
||||||
if (class_id <= u2) return l2 + s2 * (class_id - u1);
|
class_id -= kMidClass;
|
||||||
if (class_id <= u3) return l3 + s3 * (class_id - u2);
|
uptr t = kMidSize << (class_id >> S);
|
||||||
if (class_id <= u4) return l4 + s4 * (class_id - u3);
|
return t + (t >> S) * (class_id & M);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static uptr ClassID(uptr size) {
|
static uptr ClassID(uptr size) {
|
||||||
if (size <= l1) return 0 + (size - l0 + s0 - 1) / s0;
|
if (size <= kMidSize)
|
||||||
if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
|
return (size + kMinSize - 1) >> kMinSizeLog;
|
||||||
if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
|
if (size > kMaxSize) return 0;
|
||||||
if (size <= l4) return u2 + (size - l3 + s3 - 1) / s3;
|
uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
|
||||||
if (size <= l5) return u3 + (size - l4 + s4 - 1) / s4;
|
uptr hbits = (size >> (l - S)) & M;
|
||||||
return 0;
|
uptr lbits = size & ((1 << (l - S)) - 1);
|
||||||
|
uptr l1 = l - kMidSizeLog;
|
||||||
|
return kMidClass + (l1 << S) + hbits + (lbits > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uptr MaxCached(uptr class_id) {
|
static uptr MaxCached(uptr class_id) {
|
||||||
if (class_id <= u0) return c0;
|
if (class_id == 0) return 0;
|
||||||
if (class_id <= u1) return c1;
|
uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
|
||||||
if (class_id <= u2) return c2;
|
return Max(1UL, Min(kMaxNumCached, n));
|
||||||
if (class_id <= u3) return c3;
|
}
|
||||||
if (class_id <= u4) return c4;
|
|
||||||
return 0;
|
static void Print() {
|
||||||
|
uptr prev_s = 0;
|
||||||
|
uptr total_cached = 0;
|
||||||
|
for (uptr i = 0; i < kNumClasses; i++) {
|
||||||
|
uptr s = Size(i);
|
||||||
|
if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
|
||||||
|
Printf("\n");
|
||||||
|
uptr d = s - prev_s;
|
||||||
|
uptr p = prev_s ? (d * 100 / prev_s) : 0;
|
||||||
|
uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(s);
|
||||||
|
uptr cached = MaxCached(i) * s;
|
||||||
|
Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
|
||||||
|
"cached: %zd %zd; id %zd\n",
|
||||||
|
i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
|
||||||
|
total_cached += cached;
|
||||||
|
prev_s = s;
|
||||||
|
}
|
||||||
|
Printf("Total cached: %zd\n", total_cached);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void Validate() {
|
||||||
|
for (uptr c = 1; c < kNumClasses; c++) {
|
||||||
|
// Printf("Validate: c%zd\n", c);
|
||||||
|
uptr s = Size(c);
|
||||||
|
CHECK_EQ(ClassID(s), c);
|
||||||
|
if (c != kNumClasses - 1)
|
||||||
|
CHECK_EQ(ClassID(s + 1), c + 1);
|
||||||
|
CHECK_EQ(ClassID(s - 1), c);
|
||||||
|
if (c)
|
||||||
|
CHECK_GT(Size(c), Size(c-1));
|
||||||
|
}
|
||||||
|
CHECK_EQ(ClassID(kMaxSize + 1), 0);
|
||||||
|
|
||||||
|
for (uptr s = 1; s <= kMaxSize; s++) {
|
||||||
|
uptr c = ClassID(s);
|
||||||
|
// Printf("s%zd => c%zd\n", s, c);
|
||||||
|
CHECK_LT(c, kNumClasses);
|
||||||
|
CHECK_GE(Size(c), s);
|
||||||
|
if (c > 0)
|
||||||
|
CHECK_LT(Size(c-1), s);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class DefaultSizeClassMap: public SplineSizeClassMap<
|
typedef SizeClassMap<15, 256, 16> DefaultSizeClassMap;
|
||||||
/* l: */1 << 4, 1 << 9, 1 << 12, 1 << 15, 1 << 18, 1 << 21,
|
typedef SizeClassMap<15, 64, 14> CompactSizeClassMap;
|
||||||
/* s: */1 << 4, 1 << 6, 1 << 9, 1 << 12, 1 << 15,
|
|
||||||
/* c: */256, 64, 16, 4, 1> {
|
|
||||||
private:
|
|
||||||
COMPILER_CHECK(kNumClasses == 256);
|
|
||||||
};
|
|
||||||
|
|
||||||
class CompactSizeClassMap: public SplineSizeClassMap<
|
|
||||||
/* l: */1 << 3, 1 << 4, 1 << 7, 1 << 8, 1 << 12, 1 << 15,
|
|
||||||
/* s: */1 << 3, 1 << 4, 1 << 7, 1 << 8, 1 << 12,
|
|
||||||
/* c: */256, 64, 16, 4, 1> {
|
|
||||||
private:
|
|
||||||
COMPILER_CHECK(kNumClasses <= 32);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct AllocatorListNode {
|
struct AllocatorListNode {
|
||||||
AllocatorListNode *next;
|
AllocatorListNode *next;
|
||||||
|
|
@ -97,11 +161,45 @@ struct AllocatorListNode {
|
||||||
|
|
||||||
typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
|
typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
|
||||||
|
|
||||||
|
// Move at most max_count chunks from allocate_from to allocate_to.
|
||||||
|
// This function is better be a method of AllocatorFreeList, but we can't
|
||||||
|
// inherit it from IntrusiveList as the ancient gcc complains about non-PODness.
|
||||||
|
static inline uptr BulkMove(uptr max_count,
|
||||||
|
AllocatorFreeList *allocate_from,
|
||||||
|
AllocatorFreeList *allocate_to) {
|
||||||
|
CHECK(!allocate_from->empty());
|
||||||
|
CHECK(allocate_to->empty());
|
||||||
|
uptr res = 0;
|
||||||
|
if (allocate_from->size() <= max_count) {
|
||||||
|
res = allocate_from->size();
|
||||||
|
allocate_to->append_front(allocate_from);
|
||||||
|
CHECK(allocate_from->empty());
|
||||||
|
} else {
|
||||||
|
for (uptr i = 0; i < max_count; i++) {
|
||||||
|
AllocatorListNode *node = allocate_from->front();
|
||||||
|
allocate_from->pop_front();
|
||||||
|
allocate_to->push_front(node);
|
||||||
|
}
|
||||||
|
res = max_count;
|
||||||
|
CHECK(!allocate_from->empty());
|
||||||
|
}
|
||||||
|
CHECK(!allocate_to->empty());
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocators call these callbacks on mmap/munmap.
|
||||||
|
struct NoOpMapUnmapCallback {
|
||||||
|
void OnMap(uptr p, uptr size) const { }
|
||||||
|
void OnUnmap(uptr p, uptr size) const { }
|
||||||
|
};
|
||||||
|
|
||||||
// SizeClassAllocator64 -- allocator for 64-bit address space.
|
// SizeClassAllocator64 -- allocator for 64-bit address space.
|
||||||
//
|
//
|
||||||
// Space: a portion of address space of kSpaceSize bytes starting at
|
// Space: a portion of address space of kSpaceSize bytes starting at
|
||||||
// a fixed address (kSpaceBeg). Both constants are powers of two and
|
// a fixed address (kSpaceBeg). Both constants are powers of two and
|
||||||
// kSpaceBeg is kSpaceSize-aligned.
|
// kSpaceBeg is kSpaceSize-aligned.
|
||||||
|
// At the beginning the entire space is mprotect-ed, then small parts of it
|
||||||
|
// are mapped on demand.
|
||||||
//
|
//
|
||||||
// Region: a part of Space dedicated to a single size class.
|
// Region: a part of Space dedicated to a single size class.
|
||||||
// There are kNumClasses Regions of equal size.
|
// There are kNumClasses Regions of equal size.
|
||||||
|
|
@ -112,22 +210,35 @@ typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
|
||||||
// A Region looks like this:
|
// A Region looks like this:
|
||||||
// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
|
// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
|
||||||
template <const uptr kSpaceBeg, const uptr kSpaceSize,
|
template <const uptr kSpaceBeg, const uptr kSpaceSize,
|
||||||
const uptr kMetadataSize, class SizeClassMap>
|
const uptr kMetadataSize, class SizeClassMap,
|
||||||
|
class MapUnmapCallback = NoOpMapUnmapCallback>
|
||||||
class SizeClassAllocator64 {
|
class SizeClassAllocator64 {
|
||||||
public:
|
public:
|
||||||
void Init() {
|
void Init() {
|
||||||
CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
|
CHECK_EQ(kSpaceBeg,
|
||||||
AllocBeg(), AllocSize())));
|
reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize)));
|
||||||
|
MapWithCallback(kSpaceEnd, AdditionalSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CanAllocate(uptr size, uptr alignment) {
|
void MapWithCallback(uptr beg, uptr size) {
|
||||||
|
CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
|
||||||
|
MapUnmapCallback().OnMap(beg, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void UnmapWithCallback(uptr beg, uptr size) {
|
||||||
|
MapUnmapCallback().OnUnmap(beg, size);
|
||||||
|
UnmapOrDie(reinterpret_cast<void *>(beg), size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool CanAllocate(uptr size, uptr alignment) {
|
||||||
return size <= SizeClassMap::kMaxSize &&
|
return size <= SizeClassMap::kMaxSize &&
|
||||||
alignment <= SizeClassMap::kMaxSize;
|
alignment <= SizeClassMap::kMaxSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *Allocate(uptr size, uptr alignment) {
|
void *Allocate(uptr size, uptr alignment) {
|
||||||
|
if (size < alignment) size = alignment;
|
||||||
CHECK(CanAllocate(size, alignment));
|
CHECK(CanAllocate(size, alignment));
|
||||||
return AllocateBySizeClass(SizeClassMap::ClassID(size));
|
return AllocateBySizeClass(ClassID(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Deallocate(void *p) {
|
void Deallocate(void *p) {
|
||||||
|
|
@ -143,18 +254,8 @@ class SizeClassAllocator64 {
|
||||||
if (region->free_list.empty()) {
|
if (region->free_list.empty()) {
|
||||||
PopulateFreeList(class_id, region);
|
PopulateFreeList(class_id, region);
|
||||||
}
|
}
|
||||||
CHECK(!region->free_list.empty());
|
region->n_allocated += BulkMove(SizeClassMap::MaxCached(class_id),
|
||||||
uptr count = SizeClassMap::MaxCached(class_id);
|
®ion->free_list, free_list);
|
||||||
if (region->free_list.size() <= count) {
|
|
||||||
free_list->append_front(®ion->free_list);
|
|
||||||
} else {
|
|
||||||
for (uptr i = 0; i < count; i++) {
|
|
||||||
AllocatorListNode *node = region->free_list.front();
|
|
||||||
region->free_list.pop_front();
|
|
||||||
free_list->push_front(node);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
CHECK(!free_list->empty());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Swallow the entire free_list for the given class_id.
|
// Swallow the entire free_list for the given class_id.
|
||||||
|
|
@ -162,6 +263,7 @@ class SizeClassAllocator64 {
|
||||||
CHECK_LT(class_id, kNumClasses);
|
CHECK_LT(class_id, kNumClasses);
|
||||||
RegionInfo *region = GetRegionInfo(class_id);
|
RegionInfo *region = GetRegionInfo(class_id);
|
||||||
SpinMutexLock l(®ion->mutex);
|
SpinMutexLock l(®ion->mutex);
|
||||||
|
region->n_freed += free_list->size();
|
||||||
region->free_list.append_front(free_list);
|
region->free_list.append_front(free_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -170,16 +272,20 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
static uptr GetSizeClass(void *p) {
|
static uptr GetSizeClass(void *p) {
|
||||||
return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
|
return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *GetBlockBegin(void *p) {
|
void *GetBlockBegin(void *p) {
|
||||||
uptr class_id = GetSizeClass(p);
|
uptr class_id = GetSizeClass(p);
|
||||||
uptr size = SizeClassMap::Size(class_id);
|
uptr size = SizeClassMap::Size(class_id);
|
||||||
uptr chunk_idx = GetChunkIdx((uptr)p, size);
|
uptr chunk_idx = GetChunkIdx((uptr)p, size);
|
||||||
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
|
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
|
||||||
uptr begin = reg_beg + chunk_idx * size;
|
uptr beg = chunk_idx * size;
|
||||||
return (void*)begin;
|
uptr next_beg = beg + size;
|
||||||
|
RegionInfo *region = GetRegionInfo(class_id);
|
||||||
|
if (region->mapped_user >= next_beg)
|
||||||
|
return reinterpret_cast<void*>(reg_beg + beg);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static uptr GetActuallyAllocatedSize(void *p) {
|
static uptr GetActuallyAllocatedSize(void *p) {
|
||||||
|
|
@ -206,39 +312,66 @@ class SizeClassAllocator64 {
|
||||||
|
|
||||||
// Test-only.
|
// Test-only.
|
||||||
void TestOnlyUnmap() {
|
void TestOnlyUnmap() {
|
||||||
UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
|
UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
static uptr AllocBeg() { return kSpaceBeg; }
|
void PrintStats() {
|
||||||
static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
|
uptr total_mapped = 0;
|
||||||
|
uptr n_allocated = 0;
|
||||||
|
uptr n_freed = 0;
|
||||||
|
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||||
|
RegionInfo *region = GetRegionInfo(class_id);
|
||||||
|
total_mapped += region->mapped_user;
|
||||||
|
n_allocated += region->n_allocated;
|
||||||
|
n_freed += region->n_freed;
|
||||||
|
}
|
||||||
|
Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
|
||||||
|
"remains %zd\n",
|
||||||
|
total_mapped >> 20, n_allocated, n_allocated - n_freed);
|
||||||
|
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||||
|
RegionInfo *region = GetRegionInfo(class_id);
|
||||||
|
if (region->mapped_user == 0) continue;
|
||||||
|
Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
|
||||||
|
class_id,
|
||||||
|
SizeClassMap::Size(class_id),
|
||||||
|
region->mapped_user >> 10,
|
||||||
|
region->n_allocated,
|
||||||
|
region->n_allocated - region->n_freed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
typedef SizeClassMap SizeClassMapT;
|
typedef SizeClassMap SizeClassMapT;
|
||||||
static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 256
|
static const uptr kNumClasses = SizeClassMap::kNumClasses;
|
||||||
|
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static const uptr kRegionSize = kSpaceSize / kNumClasses;
|
static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
|
||||||
|
static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
|
||||||
COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
|
COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
|
||||||
COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
|
|
||||||
// kRegionSize must be >= 2^32.
|
// kRegionSize must be >= 2^32.
|
||||||
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
|
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
|
||||||
// Populate the free list with at most this number of bytes at once
|
// Populate the free list with at most this number of bytes at once
|
||||||
// or with one element if its size is greater.
|
// or with one element if its size is greater.
|
||||||
static const uptr kPopulateSize = 1 << 18;
|
static const uptr kPopulateSize = 1 << 15;
|
||||||
|
// Call mmap for user memory with at least this size.
|
||||||
|
static const uptr kUserMapSize = 1 << 15;
|
||||||
|
// Call mmap for metadata memory with at least this size.
|
||||||
|
static const uptr kMetaMapSize = 1 << 16;
|
||||||
|
|
||||||
struct RegionInfo {
|
struct RegionInfo {
|
||||||
SpinMutex mutex;
|
SpinMutex mutex;
|
||||||
AllocatorFreeList free_list;
|
AllocatorFreeList free_list;
|
||||||
uptr allocated_user; // Bytes allocated for user memory.
|
uptr allocated_user; // Bytes allocated for user memory.
|
||||||
uptr allocated_meta; // Bytes allocated for metadata.
|
uptr allocated_meta; // Bytes allocated for metadata.
|
||||||
char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
|
uptr mapped_user; // Bytes mapped for user memory.
|
||||||
|
uptr mapped_meta; // Bytes mapped for metadata.
|
||||||
|
uptr n_allocated, n_freed; // Just stats.
|
||||||
};
|
};
|
||||||
COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
|
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
|
||||||
|
|
||||||
static uptr AdditionalSize() {
|
static uptr AdditionalSize() {
|
||||||
uptr PageSize = GetPageSizeCached();
|
return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
|
||||||
uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize);
|
GetPageSizeCached());
|
||||||
CHECK_EQ(res % PageSize, 0);
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
RegionInfo *GetRegionInfo(uptr class_id) {
|
RegionInfo *GetRegionInfo(uptr class_id) {
|
||||||
|
|
@ -256,11 +389,20 @@ class SizeClassAllocator64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
void PopulateFreeList(uptr class_id, RegionInfo *region) {
|
void PopulateFreeList(uptr class_id, RegionInfo *region) {
|
||||||
|
CHECK(region->free_list.empty());
|
||||||
uptr size = SizeClassMap::Size(class_id);
|
uptr size = SizeClassMap::Size(class_id);
|
||||||
uptr beg_idx = region->allocated_user;
|
uptr beg_idx = region->allocated_user;
|
||||||
uptr end_idx = beg_idx + kPopulateSize;
|
uptr end_idx = beg_idx + kPopulateSize;
|
||||||
region->free_list.clear();
|
|
||||||
uptr region_beg = kSpaceBeg + kRegionSize * class_id;
|
uptr region_beg = kSpaceBeg + kRegionSize * class_id;
|
||||||
|
if (end_idx + size > region->mapped_user) {
|
||||||
|
// Do the mmap for the user memory.
|
||||||
|
uptr map_size = kUserMapSize;
|
||||||
|
while (end_idx + size > region->mapped_user + map_size)
|
||||||
|
map_size += kUserMapSize;
|
||||||
|
CHECK_GE(region->mapped_user + map_size, end_idx);
|
||||||
|
MapWithCallback(region_beg + region->mapped_user, map_size);
|
||||||
|
region->mapped_user += map_size;
|
||||||
|
}
|
||||||
uptr idx = beg_idx;
|
uptr idx = beg_idx;
|
||||||
uptr i = 0;
|
uptr i = 0;
|
||||||
do { // do-while loop because we need to put at least one item.
|
do { // do-while loop because we need to put at least one item.
|
||||||
|
|
@ -270,7 +412,19 @@ class SizeClassAllocator64 {
|
||||||
i++;
|
i++;
|
||||||
} while (idx < end_idx);
|
} while (idx < end_idx);
|
||||||
region->allocated_user += idx - beg_idx;
|
region->allocated_user += idx - beg_idx;
|
||||||
|
CHECK_LE(region->allocated_user, region->mapped_user);
|
||||||
region->allocated_meta += i * kMetadataSize;
|
region->allocated_meta += i * kMetadataSize;
|
||||||
|
if (region->allocated_meta > region->mapped_meta) {
|
||||||
|
uptr map_size = kMetaMapSize;
|
||||||
|
while (region->allocated_meta > region->mapped_meta + map_size)
|
||||||
|
map_size += kMetaMapSize;
|
||||||
|
// Do the mmap for the metadata.
|
||||||
|
CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
|
||||||
|
MapWithCallback(region_beg + kRegionSize -
|
||||||
|
region->mapped_meta - map_size, map_size);
|
||||||
|
region->mapped_meta += map_size;
|
||||||
|
}
|
||||||
|
CHECK_LE(region->allocated_meta, region->mapped_meta);
|
||||||
if (region->allocated_user + region->allocated_meta > kRegionSize) {
|
if (region->allocated_user + region->allocated_meta > kRegionSize) {
|
||||||
Printf("Out of memory. Dying.\n");
|
Printf("Out of memory. Dying.\n");
|
||||||
Printf("The process has exhausted %zuMB for size class %zu.\n",
|
Printf("The process has exhausted %zuMB for size class %zu.\n",
|
||||||
|
|
@ -289,6 +443,7 @@ class SizeClassAllocator64 {
|
||||||
CHECK(!region->free_list.empty());
|
CHECK(!region->free_list.empty());
|
||||||
AllocatorListNode *node = region->free_list.front();
|
AllocatorListNode *node = region->free_list.front();
|
||||||
region->free_list.pop_front();
|
region->free_list.pop_front();
|
||||||
|
region->n_allocated++;
|
||||||
return reinterpret_cast<void*>(node);
|
return reinterpret_cast<void*>(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -296,9 +451,213 @@ class SizeClassAllocator64 {
|
||||||
RegionInfo *region = GetRegionInfo(class_id);
|
RegionInfo *region = GetRegionInfo(class_id);
|
||||||
SpinMutexLock l(®ion->mutex);
|
SpinMutexLock l(®ion->mutex);
|
||||||
region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
|
region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
|
||||||
|
region->n_freed++;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// SizeClassAllocator32 -- allocator for 32-bit address space.
|
||||||
|
// This allocator can theoretically be used on 64-bit arch, but there it is less
|
||||||
|
// efficient than SizeClassAllocator64.
|
||||||
|
//
|
||||||
|
// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
|
||||||
|
// be returned by MmapOrDie().
|
||||||
|
//
|
||||||
|
// Region:
|
||||||
|
// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
|
||||||
|
// Since the regions are aligned by kRegionSize, there are exactly
|
||||||
|
// kNumPossibleRegions possible regions in the address space and so we keep
|
||||||
|
// an u8 array possible_regions[kNumPossibleRegions] to store the size classes.
|
||||||
|
// 0 size class means the region is not used by the allocator.
|
||||||
|
//
|
||||||
|
// One Region is used to allocate chunks of a single size class.
|
||||||
|
// A Region looks like this:
|
||||||
|
// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
|
||||||
|
//
|
||||||
|
// In order to avoid false sharing the objects of this class should be
|
||||||
|
// chache-line aligned.
|
||||||
|
template <const uptr kSpaceBeg, const u64 kSpaceSize,
|
||||||
|
const uptr kMetadataSize, class SizeClassMap,
|
||||||
|
class MapUnmapCallback = NoOpMapUnmapCallback>
|
||||||
|
class SizeClassAllocator32 {
|
||||||
|
public:
|
||||||
|
void Init() {
|
||||||
|
state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State)));
|
||||||
|
}
|
||||||
|
|
||||||
|
void *MapWithCallback(uptr size) {
|
||||||
|
size = RoundUpTo(size, GetPageSizeCached());
|
||||||
|
void *res = MmapOrDie(size, "SizeClassAllocator32");
|
||||||
|
MapUnmapCallback().OnMap((uptr)res, size);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
void UnmapWithCallback(uptr beg, uptr size) {
|
||||||
|
MapUnmapCallback().OnUnmap(beg, size);
|
||||||
|
UnmapOrDie(reinterpret_cast<void *>(beg), size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool CanAllocate(uptr size, uptr alignment) {
|
||||||
|
return size <= SizeClassMap::kMaxSize &&
|
||||||
|
alignment <= SizeClassMap::kMaxSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *Allocate(uptr size, uptr alignment) {
|
||||||
|
if (size < alignment) size = alignment;
|
||||||
|
CHECK(CanAllocate(size, alignment));
|
||||||
|
return AllocateBySizeClass(ClassID(size));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Deallocate(void *p) {
|
||||||
|
CHECK(PointerIsMine(p));
|
||||||
|
DeallocateBySizeClass(p, GetSizeClass(p));
|
||||||
|
}
|
||||||
|
|
||||||
|
void *GetMetaData(void *p) {
|
||||||
|
CHECK(PointerIsMine(p));
|
||||||
|
uptr mem = reinterpret_cast<uptr>(p);
|
||||||
|
uptr beg = ComputeRegionBeg(mem);
|
||||||
|
uptr size = SizeClassMap::Size(GetSizeClass(p));
|
||||||
|
u32 offset = mem - beg;
|
||||||
|
uptr n = offset / (u32)size; // 32-bit division
|
||||||
|
uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
|
||||||
|
return reinterpret_cast<void*>(meta);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate several chunks of the given class_id.
|
||||||
|
void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
|
||||||
|
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||||
|
SpinMutexLock l(&sci->mutex);
|
||||||
|
EnsureSizeClassHasAvailableChunks(sci, class_id);
|
||||||
|
CHECK(!sci->free_list.empty());
|
||||||
|
BulkMove(SizeClassMap::MaxCached(class_id), &sci->free_list, free_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swallow the entire free_list for the given class_id.
|
||||||
|
void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
|
||||||
|
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||||
|
SpinMutexLock l(&sci->mutex);
|
||||||
|
sci->free_list.append_front(free_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PointerIsMine(void *p) {
|
||||||
|
return GetSizeClass(p) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr GetSizeClass(void *p) {
|
||||||
|
return state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
|
||||||
|
}
|
||||||
|
|
||||||
|
void *GetBlockBegin(void *p) {
|
||||||
|
CHECK(PointerIsMine(p));
|
||||||
|
uptr mem = reinterpret_cast<uptr>(p);
|
||||||
|
uptr beg = ComputeRegionBeg(mem);
|
||||||
|
uptr size = SizeClassMap::Size(GetSizeClass(p));
|
||||||
|
u32 offset = mem - beg;
|
||||||
|
u32 n = offset / (u32)size; // 32-bit division
|
||||||
|
uptr res = beg + (n * (u32)size);
|
||||||
|
return reinterpret_cast<void*>(res);
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr GetActuallyAllocatedSize(void *p) {
|
||||||
|
CHECK(PointerIsMine(p));
|
||||||
|
return SizeClassMap::Size(GetSizeClass(p));
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
|
||||||
|
|
||||||
|
uptr TotalMemoryUsed() {
|
||||||
|
// No need to lock here.
|
||||||
|
uptr res = 0;
|
||||||
|
for (uptr i = 0; i < kNumPossibleRegions; i++)
|
||||||
|
if (state_->possible_regions[i])
|
||||||
|
res += kRegionSize;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestOnlyUnmap() {
|
||||||
|
for (uptr i = 0; i < kNumPossibleRegions; i++)
|
||||||
|
if (state_->possible_regions[i])
|
||||||
|
UnmapWithCallback((i * kRegionSize), kRegionSize);
|
||||||
|
UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
|
||||||
|
}
|
||||||
|
|
||||||
|
void PrintStats() {
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef SizeClassMap SizeClassMapT;
|
||||||
|
static const uptr kNumClasses = SizeClassMap::kNumClasses;
|
||||||
|
|
||||||
|
private:
|
||||||
|
static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
|
||||||
|
static const uptr kRegionSize = 1 << kRegionSizeLog;
|
||||||
|
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
|
||||||
|
|
||||||
|
struct SizeClassInfo {
|
||||||
|
SpinMutex mutex;
|
||||||
|
AllocatorFreeList free_list;
|
||||||
|
char padding[kCacheLineSize - sizeof(uptr) - sizeof(AllocatorFreeList)];
|
||||||
|
};
|
||||||
|
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
|
||||||
|
|
||||||
|
uptr ComputeRegionId(uptr mem) {
|
||||||
|
uptr res = mem >> kRegionSizeLog;
|
||||||
|
CHECK_LT(res, kNumPossibleRegions);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr ComputeRegionBeg(uptr mem) {
|
||||||
|
return mem & ~(kRegionSize - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr AllocateRegion(uptr class_id) {
|
||||||
|
CHECK_LT(class_id, kNumClasses);
|
||||||
|
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
|
||||||
|
"SizeClassAllocator32"));
|
||||||
|
MapUnmapCallback().OnMap(res, kRegionSize);
|
||||||
|
CHECK_EQ(0U, (res & (kRegionSize - 1)));
|
||||||
|
CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
|
||||||
|
state_->possible_regions[ComputeRegionId(res)] = class_id;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
|
||||||
|
CHECK_LT(class_id, kNumClasses);
|
||||||
|
return &state_->size_class_info_array[class_id];
|
||||||
|
}
|
||||||
|
|
||||||
|
void EnsureSizeClassHasAvailableChunks(SizeClassInfo *sci, uptr class_id) {
|
||||||
|
if (!sci->free_list.empty()) return;
|
||||||
|
uptr size = SizeClassMap::Size(class_id);
|
||||||
|
uptr reg = AllocateRegion(class_id);
|
||||||
|
uptr n_chunks = kRegionSize / (size + kMetadataSize);
|
||||||
|
for (uptr i = reg; i < reg + n_chunks * size; i += size)
|
||||||
|
sci->free_list.push_back(reinterpret_cast<AllocatorListNode*>(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
void *AllocateBySizeClass(uptr class_id) {
|
||||||
|
CHECK_LT(class_id, kNumClasses);
|
||||||
|
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||||
|
SpinMutexLock l(&sci->mutex);
|
||||||
|
EnsureSizeClassHasAvailableChunks(sci, class_id);
|
||||||
|
CHECK(!sci->free_list.empty());
|
||||||
|
AllocatorListNode *node = sci->free_list.front();
|
||||||
|
sci->free_list.pop_front();
|
||||||
|
return reinterpret_cast<void*>(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DeallocateBySizeClass(void *p, uptr class_id) {
|
||||||
|
CHECK_LT(class_id, kNumClasses);
|
||||||
|
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||||
|
SpinMutexLock l(&sci->mutex);
|
||||||
|
sci->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
|
||||||
|
}
|
||||||
|
|
||||||
|
struct State {
|
||||||
|
u8 possible_regions[kNumPossibleRegions];
|
||||||
|
SizeClassInfo size_class_info_array[kNumClasses];
|
||||||
|
};
|
||||||
|
State *state_;
|
||||||
|
};
|
||||||
|
|
||||||
// Objects of this type should be used as local caches for SizeClassAllocator64.
|
// Objects of this type should be used as local caches for SizeClassAllocator64.
|
||||||
// Since the typical use of this class is to have one object per thread in TLS,
|
// Since the typical use of this class is to have one object per thread in TLS,
|
||||||
// is has to be POD.
|
// is has to be POD.
|
||||||
|
|
@ -312,6 +671,7 @@ struct SizeClassAllocatorLocalCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
|
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
|
||||||
|
CHECK_NE(class_id, 0UL);
|
||||||
CHECK_LT(class_id, kNumClasses);
|
CHECK_LT(class_id, kNumClasses);
|
||||||
AllocatorFreeList *free_list = &free_lists_[class_id];
|
AllocatorFreeList *free_list = &free_lists_[class_id];
|
||||||
if (free_list->empty())
|
if (free_list->empty())
|
||||||
|
|
@ -323,6 +683,7 @@ struct SizeClassAllocatorLocalCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
|
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
|
||||||
|
CHECK_NE(class_id, 0UL);
|
||||||
CHECK_LT(class_id, kNumClasses);
|
CHECK_LT(class_id, kNumClasses);
|
||||||
AllocatorFreeList *free_list = &free_lists_[class_id];
|
AllocatorFreeList *free_list = &free_lists_[class_id];
|
||||||
free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
|
free_list->push_front(reinterpret_cast<AllocatorListNode*>(p));
|
||||||
|
|
@ -358,6 +719,7 @@ struct SizeClassAllocatorLocalCache {
|
||||||
// This class can (de)allocate only large chunks of memory using mmap/unmap.
|
// This class can (de)allocate only large chunks of memory using mmap/unmap.
|
||||||
// The main purpose of this allocator is to cover large and rare allocation
|
// The main purpose of this allocator is to cover large and rare allocation
|
||||||
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
|
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
|
||||||
|
template <class MapUnmapCallback = NoOpMapUnmapCallback>
|
||||||
class LargeMmapAllocator {
|
class LargeMmapAllocator {
|
||||||
public:
|
public:
|
||||||
void Init() {
|
void Init() {
|
||||||
|
|
@ -372,6 +734,7 @@ class LargeMmapAllocator {
|
||||||
if (map_size < size) return 0; // Overflow.
|
if (map_size < size) return 0; // Overflow.
|
||||||
uptr map_beg = reinterpret_cast<uptr>(
|
uptr map_beg = reinterpret_cast<uptr>(
|
||||||
MmapOrDie(map_size, "LargeMmapAllocator"));
|
MmapOrDie(map_size, "LargeMmapAllocator"));
|
||||||
|
MapUnmapCallback().OnMap(map_beg, map_size);
|
||||||
uptr map_end = map_beg + map_size;
|
uptr map_end = map_beg + map_size;
|
||||||
uptr res = map_beg + page_size_;
|
uptr res = map_beg + page_size_;
|
||||||
if (res & (alignment - 1)) // Align.
|
if (res & (alignment - 1)) // Align.
|
||||||
|
|
@ -384,11 +747,13 @@ class LargeMmapAllocator {
|
||||||
h->map_size = map_size;
|
h->map_size = map_size;
|
||||||
{
|
{
|
||||||
SpinMutexLock l(&mutex_);
|
SpinMutexLock l(&mutex_);
|
||||||
h->next = list_;
|
uptr idx = n_chunks_++;
|
||||||
h->prev = 0;
|
CHECK_LT(idx, kMaxNumChunks);
|
||||||
if (list_)
|
h->chunk_idx = idx;
|
||||||
list_->prev = h;
|
chunks_[idx] = h;
|
||||||
list_ = h;
|
stats.n_allocs++;
|
||||||
|
stats.currently_allocated += map_size;
|
||||||
|
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
|
||||||
}
|
}
|
||||||
return reinterpret_cast<void*>(res);
|
return reinterpret_cast<void*>(res);
|
||||||
}
|
}
|
||||||
|
|
@ -397,63 +762,81 @@ class LargeMmapAllocator {
|
||||||
Header *h = GetHeader(p);
|
Header *h = GetHeader(p);
|
||||||
{
|
{
|
||||||
SpinMutexLock l(&mutex_);
|
SpinMutexLock l(&mutex_);
|
||||||
Header *prev = h->prev;
|
uptr idx = h->chunk_idx;
|
||||||
Header *next = h->next;
|
CHECK_EQ(chunks_[idx], h);
|
||||||
if (prev)
|
CHECK_LT(idx, n_chunks_);
|
||||||
prev->next = next;
|
chunks_[idx] = chunks_[n_chunks_ - 1];
|
||||||
if (next)
|
chunks_[idx]->chunk_idx = idx;
|
||||||
next->prev = prev;
|
n_chunks_--;
|
||||||
if (h == list_)
|
stats.n_frees++;
|
||||||
list_ = next;
|
stats.currently_allocated -= h->map_size;
|
||||||
}
|
}
|
||||||
|
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
|
||||||
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
|
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
uptr TotalMemoryUsed() {
|
uptr TotalMemoryUsed() {
|
||||||
SpinMutexLock l(&mutex_);
|
SpinMutexLock l(&mutex_);
|
||||||
uptr res = 0;
|
uptr res = 0;
|
||||||
for (Header *l = list_; l; l = l->next) {
|
for (uptr i = 0; i < n_chunks_; i++) {
|
||||||
res += RoundUpMapSize(l->size);
|
Header *h = chunks_[i];
|
||||||
|
CHECK_EQ(h->chunk_idx, i);
|
||||||
|
res += RoundUpMapSize(h->size);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PointerIsMine(void *p) {
|
bool PointerIsMine(void *p) {
|
||||||
// Fast check.
|
return GetBlockBegin(p) != 0;
|
||||||
if ((reinterpret_cast<uptr>(p) & (page_size_ - 1))) return false;
|
|
||||||
SpinMutexLock l(&mutex_);
|
|
||||||
for (Header *l = list_; l; l = l->next) {
|
|
||||||
if (GetUser(l) == p) return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uptr GetActuallyAllocatedSize(void *p) {
|
uptr GetActuallyAllocatedSize(void *p) {
|
||||||
return RoundUpMapSize(GetHeader(p)->size) - page_size_;
|
return RoundUpTo(GetHeader(p)->size, page_size_);
|
||||||
}
|
}
|
||||||
|
|
||||||
// At least page_size_/2 metadata bytes is available.
|
// At least page_size_/2 metadata bytes is available.
|
||||||
void *GetMetaData(void *p) {
|
void *GetMetaData(void *p) {
|
||||||
|
// Too slow: CHECK_EQ(p, GetBlockBegin(p));
|
||||||
|
CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
|
||||||
return GetHeader(p) + 1;
|
return GetHeader(p) + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *GetBlockBegin(void *p) {
|
void *GetBlockBegin(void *ptr) {
|
||||||
|
uptr p = reinterpret_cast<uptr>(ptr);
|
||||||
SpinMutexLock l(&mutex_);
|
SpinMutexLock l(&mutex_);
|
||||||
for (Header *l = list_; l; l = l->next) {
|
uptr nearest_chunk = 0;
|
||||||
void *b = GetUser(l);
|
// Cache-friendly linear search.
|
||||||
if (p >= b && p < (u8*)b + l->size)
|
for (uptr i = 0; i < n_chunks_; i++) {
|
||||||
return b;
|
uptr ch = reinterpret_cast<uptr>(chunks_[i]);
|
||||||
|
if (p < ch) continue; // p is at left to this chunk, skip it.
|
||||||
|
if (p - ch < p - nearest_chunk)
|
||||||
|
nearest_chunk = ch;
|
||||||
}
|
}
|
||||||
return 0;
|
if (!nearest_chunk)
|
||||||
|
return 0;
|
||||||
|
Header *h = reinterpret_cast<Header *>(nearest_chunk);
|
||||||
|
CHECK_GE(nearest_chunk, h->map_beg);
|
||||||
|
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
|
||||||
|
CHECK_LE(nearest_chunk, p);
|
||||||
|
if (h->map_beg + h->map_size < p)
|
||||||
|
return 0;
|
||||||
|
return GetUser(h);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PrintStats() {
|
||||||
|
Printf("Stats: LargeMmapAllocator: allocated %zd times, "
|
||||||
|
"remains %zd (%zd K) max %zd M\n",
|
||||||
|
stats.n_allocs, stats.n_allocs - stats.n_frees,
|
||||||
|
stats.currently_allocated >> 10, stats.max_allocated >> 20);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
|
||||||
struct Header {
|
struct Header {
|
||||||
uptr map_beg;
|
uptr map_beg;
|
||||||
uptr map_size;
|
uptr map_size;
|
||||||
uptr size;
|
uptr size;
|
||||||
Header *next;
|
uptr chunk_idx;
|
||||||
Header *prev;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Header *GetHeader(uptr p) {
|
Header *GetHeader(uptr p) {
|
||||||
|
|
@ -472,7 +855,11 @@ class LargeMmapAllocator {
|
||||||
}
|
}
|
||||||
|
|
||||||
uptr page_size_;
|
uptr page_size_;
|
||||||
Header *list_;
|
Header *chunks_[kMaxNumChunks];
|
||||||
|
uptr n_chunks_;
|
||||||
|
struct Stats {
|
||||||
|
uptr n_allocs, n_frees, currently_allocated, max_allocated;
|
||||||
|
} stats;
|
||||||
SpinMutex mutex_;
|
SpinMutex mutex_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -501,10 +888,14 @@ class CombinedAllocator {
|
||||||
if (alignment > 8)
|
if (alignment > 8)
|
||||||
size = RoundUpTo(size, alignment);
|
size = RoundUpTo(size, alignment);
|
||||||
void *res;
|
void *res;
|
||||||
if (primary_.CanAllocate(size, alignment))
|
if (primary_.CanAllocate(size, alignment)) {
|
||||||
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
if (cache) // Allocate from cache.
|
||||||
else
|
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
||||||
|
else // No thread-local cache, allocate directly from primary allocator.
|
||||||
|
res = primary_.Allocate(size, alignment);
|
||||||
|
} else { // Secondary allocator does not use cache.
|
||||||
res = secondary_.Allocate(size, alignment);
|
res = secondary_.Allocate(size, alignment);
|
||||||
|
}
|
||||||
if (alignment > 8)
|
if (alignment > 8)
|
||||||
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
||||||
if (cleared && res)
|
if (cleared && res)
|
||||||
|
|
@ -544,6 +935,10 @@ class CombinedAllocator {
|
||||||
return secondary_.PointerIsMine(p);
|
return secondary_.PointerIsMine(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool FromPrimary(void *p) {
|
||||||
|
return primary_.PointerIsMine(p);
|
||||||
|
}
|
||||||
|
|
||||||
void *GetMetaData(void *p) {
|
void *GetMetaData(void *p) {
|
||||||
if (primary_.PointerIsMine(p))
|
if (primary_.PointerIsMine(p))
|
||||||
return primary_.GetMetaData(p);
|
return primary_.GetMetaData(p);
|
||||||
|
|
@ -572,6 +967,11 @@ class CombinedAllocator {
|
||||||
cache->Drain(&primary_);
|
cache->Drain(&primary_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PrintStats() {
|
||||||
|
primary_.PrintStats();
|
||||||
|
secondary_.PrintStats();
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
PrimaryAllocator primary_;
|
PrimaryAllocator primary_;
|
||||||
SecondaryAllocator secondary_;
|
SecondaryAllocator secondary_;
|
||||||
|
|
|
||||||
|
|
@ -22,9 +22,31 @@ extern "C" void _mm_pause();
|
||||||
extern "C" long _InterlockedExchangeAdd( // NOLINT
|
extern "C" long _InterlockedExchangeAdd( // NOLINT
|
||||||
long volatile * Addend, long Value); // NOLINT
|
long volatile * Addend, long Value); // NOLINT
|
||||||
#pragma intrinsic(_InterlockedExchangeAdd)
|
#pragma intrinsic(_InterlockedExchangeAdd)
|
||||||
extern "C" void *InterlockedCompareExchangePointer(
|
|
||||||
|
#ifdef _WIN64
|
||||||
|
extern "C" void *_InterlockedCompareExchangePointer(
|
||||||
void *volatile *Destination,
|
void *volatile *Destination,
|
||||||
void *Exchange, void *Comparand);
|
void *Exchange, void *Comparand);
|
||||||
|
#pragma intrinsic(_InterlockedCompareExchangePointer)
|
||||||
|
#else
|
||||||
|
// There's no _InterlockedCompareExchangePointer intrinsic on x86,
|
||||||
|
// so call _InterlockedCompareExchange instead.
|
||||||
|
extern "C"
|
||||||
|
long __cdecl _InterlockedCompareExchange( // NOLINT
|
||||||
|
long volatile *Destination, // NOLINT
|
||||||
|
long Exchange, long Comparand); // NOLINT
|
||||||
|
#pragma intrinsic(_InterlockedCompareExchange)
|
||||||
|
|
||||||
|
inline static void *_InterlockedCompareExchangePointer(
|
||||||
|
void *volatile *Destination,
|
||||||
|
void *Exchange, void *Comparand) {
|
||||||
|
return reinterpret_cast<void*>(
|
||||||
|
_InterlockedCompareExchange(
|
||||||
|
reinterpret_cast<long volatile*>(Destination), // NOLINT
|
||||||
|
reinterpret_cast<long>(Exchange), // NOLINT
|
||||||
|
reinterpret_cast<long>(Comparand))); // NOLINT
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace __sanitizer {
|
namespace __sanitizer {
|
||||||
|
|
||||||
|
|
@ -113,7 +135,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
|
||||||
uptr xchg,
|
uptr xchg,
|
||||||
memory_order mo) {
|
memory_order mo) {
|
||||||
uptr cmpv = *cmp;
|
uptr cmpv = *cmp;
|
||||||
uptr prev = (uptr)InterlockedCompareExchangePointer(
|
uptr prev = (uptr)_InterlockedCompareExchangePointer(
|
||||||
(void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
|
(void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
|
||||||
if (prev == cmpv)
|
if (prev == cmpv)
|
||||||
return true;
|
return true;
|
||||||
|
|
|
||||||
|
|
@ -153,6 +153,27 @@ void SortArray(uptr *array, uptr size) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We want to map a chunk of address space aligned to 'alignment'.
|
||||||
|
// We do it by maping a bit more and then unmaping redundant pieces.
|
||||||
|
// We probably can do it with fewer syscalls in some OS-dependent way.
|
||||||
|
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||||
|
// uptr PageSize = GetPageSizeCached();
|
||||||
|
CHECK(IsPowerOfTwo(size));
|
||||||
|
CHECK(IsPowerOfTwo(alignment));
|
||||||
|
uptr map_size = size + alignment;
|
||||||
|
uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
|
||||||
|
uptr map_end = map_res + map_size;
|
||||||
|
uptr res = map_res;
|
||||||
|
if (res & (alignment - 1)) // Not aligned.
|
||||||
|
res = (map_res + alignment) & ~(alignment - 1);
|
||||||
|
uptr end = res + size;
|
||||||
|
if (res != map_res)
|
||||||
|
UnmapOrDie((void*)map_res, res - map_res);
|
||||||
|
if (end != map_end)
|
||||||
|
UnmapOrDie((void*)end, map_end - end);
|
||||||
|
return (void*)res;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
||||||
using namespace __sanitizer; // NOLINT
|
using namespace __sanitizer; // NOLINT
|
||||||
|
|
@ -178,4 +199,9 @@ void __sanitizer_set_report_fd(int fd) {
|
||||||
internal_close(report_fd);
|
internal_close(report_fd);
|
||||||
report_fd = fd;
|
report_fd = fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) {
|
||||||
|
(void)reserved;
|
||||||
|
PrepareForSandboxing();
|
||||||
|
}
|
||||||
} // extern "C"
|
} // extern "C"
|
||||||
|
|
|
||||||
|
|
@ -42,9 +42,13 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
|
||||||
void *MmapOrDie(uptr size, const char *mem_type);
|
void *MmapOrDie(uptr size, const char *mem_type);
|
||||||
void UnmapOrDie(void *addr, uptr size);
|
void UnmapOrDie(void *addr, uptr size);
|
||||||
void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
|
void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
|
||||||
|
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
|
||||||
void *Mprotect(uptr fixed_addr, uptr size);
|
void *Mprotect(uptr fixed_addr, uptr size);
|
||||||
|
// Map aligned chunk of address space; size and alignment are powers of two.
|
||||||
|
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
|
||||||
// Used to check if we can map shadow memory to a fixed location.
|
// Used to check if we can map shadow memory to a fixed location.
|
||||||
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
|
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
|
||||||
|
void FlushUnneededShadowMemory(uptr addr, uptr size);
|
||||||
|
|
||||||
// Internal allocator
|
// Internal allocator
|
||||||
void *InternalAlloc(uptr size);
|
void *InternalAlloc(uptr size);
|
||||||
|
|
@ -119,6 +123,7 @@ const char *GetPwd();
|
||||||
void ReExec();
|
void ReExec();
|
||||||
bool StackSizeIsUnlimited();
|
bool StackSizeIsUnlimited();
|
||||||
void SetStackSizeLimitInBytes(uptr limit);
|
void SetStackSizeLimitInBytes(uptr limit);
|
||||||
|
void PrepareForSandboxing();
|
||||||
|
|
||||||
// Other
|
// Other
|
||||||
void SleepForSeconds(int seconds);
|
void SleepForSeconds(int seconds);
|
||||||
|
|
@ -133,6 +138,13 @@ void NORETURN Die();
|
||||||
void NORETURN SANITIZER_INTERFACE_ATTRIBUTE
|
void NORETURN SANITIZER_INTERFACE_ATTRIBUTE
|
||||||
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
|
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
|
||||||
|
|
||||||
|
// Set the name of the current thread to 'name', return true on succees.
|
||||||
|
// The name may be truncated to a system-dependent limit.
|
||||||
|
bool SanitizerSetThreadName(const char *name);
|
||||||
|
// Get the name of the current thread (no more than max_len bytes),
|
||||||
|
// return true on succees. name should have space for at least max_len+1 bytes.
|
||||||
|
bool SanitizerGetThreadName(char *name, int max_len);
|
||||||
|
|
||||||
// Specific tools may override behavior of "Die" and "CheckFailed" functions
|
// Specific tools may override behavior of "Die" and "CheckFailed" functions
|
||||||
// to do tool-specific job.
|
// to do tool-specific job.
|
||||||
void SetDieCallback(void (*callback)(void));
|
void SetDieCallback(void (*callback)(void));
|
||||||
|
|
@ -148,6 +160,12 @@ INLINE uptr RoundUpTo(uptr size, uptr boundary) {
|
||||||
CHECK(IsPowerOfTwo(boundary));
|
CHECK(IsPowerOfTwo(boundary));
|
||||||
return (size + boundary - 1) & ~(boundary - 1);
|
return (size + boundary - 1) & ~(boundary - 1);
|
||||||
}
|
}
|
||||||
|
INLINE uptr RoundDownTo(uptr x, uptr boundary) {
|
||||||
|
return x & ~(boundary - 1);
|
||||||
|
}
|
||||||
|
INLINE bool IsAligned(uptr a, uptr alignment) {
|
||||||
|
return (a & (alignment - 1)) == 0;
|
||||||
|
}
|
||||||
// Don't use std::min, std::max or std::swap, to minimize dependency
|
// Don't use std::min, std::max or std::swap, to minimize dependency
|
||||||
// on libstdc++.
|
// on libstdc++.
|
||||||
template<class T> T Min(T a, T b) { return a < b ? a : b; }
|
template<class T> T Min(T a, T b) { return a < b ? a : b; }
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,77 @@
|
||||||
|
//===-- sanitizer_common_interceptors.h -------------------------*- C++ -*-===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// Common function interceptors for tools like AddressSanitizer,
|
||||||
|
// ThreadSanitizer, MemorySanitizer, etc.
|
||||||
|
//
|
||||||
|
// This file should be included into the tool's interceptor file,
|
||||||
|
// which has to define it's own macros:
|
||||||
|
// COMMON_INTERCEPTOR_ENTER
|
||||||
|
// COMMON_INTERCEPTOR_READ_RANGE
|
||||||
|
// COMMON_INTERCEPTOR_WRITE_RANGE
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
#ifndef SANITIZER_COMMON_INTERCEPTORS_H
|
||||||
|
#define SANITIZER_COMMON_INTERCEPTORS_H
|
||||||
|
|
||||||
|
#include "interception/interception.h"
|
||||||
|
#include "sanitizer_platform_interceptors.h"
|
||||||
|
|
||||||
|
#if SANITIZER_INTERCEPT_READ
|
||||||
|
INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
|
||||||
|
COMMON_INTERCEPTOR_ENTER(read, fd, ptr, count);
|
||||||
|
SSIZE_T res = REAL(read)(fd, ptr, count);
|
||||||
|
if (res > 0)
|
||||||
|
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if SANITIZER_INTERCEPT_PREAD
|
||||||
|
INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
|
||||||
|
COMMON_INTERCEPTOR_ENTER(pread, fd, ptr, count, offset);
|
||||||
|
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
|
||||||
|
if (res > 0)
|
||||||
|
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if SANITIZER_INTERCEPT_PREAD64
|
||||||
|
INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
|
||||||
|
COMMON_INTERCEPTOR_ENTER(pread64, fd, ptr, count, offset);
|
||||||
|
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
|
||||||
|
if (res > 0)
|
||||||
|
COMMON_INTERCEPTOR_WRITE_RANGE(ptr, res);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if SANITIZER_INTERCEPT_READ
|
||||||
|
# define INIT_READ INTERCEPT_FUNCTION(read)
|
||||||
|
#else
|
||||||
|
# define INIT_READ
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if SANITIZER_INTERCEPT_PREAD
|
||||||
|
# define INIT_PREAD INTERCEPT_FUNCTION(pread)
|
||||||
|
#else
|
||||||
|
# define INIT_PREAD
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if SANITIZER_INTERCEPT_PREAD64
|
||||||
|
# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64)
|
||||||
|
#else
|
||||||
|
# define INIT_PREAD64
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define SANITIZER_COMMON_INTERCEPTORS_INIT \
|
||||||
|
INIT_READ; \
|
||||||
|
INIT_PREAD; \
|
||||||
|
INIT_PREAD64; \
|
||||||
|
|
||||||
|
#endif // SANITIZER_COMMON_INTERCEPTORS_H
|
||||||
|
|
@ -203,4 +203,23 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool mem_is_zero(const char *beg, uptr size) {
|
||||||
|
CHECK_LE(size, 1UL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
|
||||||
|
const char *end = beg + size;
|
||||||
|
uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr));
|
||||||
|
uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr));
|
||||||
|
uptr all = 0;
|
||||||
|
// Prologue.
|
||||||
|
for (const char *mem = beg; mem < (char*)aligned_beg && mem < end; mem++)
|
||||||
|
all |= *mem;
|
||||||
|
// Aligned loop.
|
||||||
|
for (; aligned_beg < aligned_end; aligned_beg++)
|
||||||
|
all |= *aligned_beg;
|
||||||
|
// Epilogue.
|
||||||
|
if ((char*)aligned_end >= beg)
|
||||||
|
for (const char *mem = (char*)aligned_end; mem < end; mem++)
|
||||||
|
all |= *mem;
|
||||||
|
return all == 0;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,11 @@ char *internal_strstr(const char *haystack, const char *needle);
|
||||||
// Works only for base=10 and doesn't set errno.
|
// Works only for base=10 and doesn't set errno.
|
||||||
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base);
|
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base);
|
||||||
|
|
||||||
|
// Return true if all bytes in [mem, mem+size) are zero.
|
||||||
|
// Optimized for the case when the result is true.
|
||||||
|
bool mem_is_zero(const char *mem, uptr size);
|
||||||
|
|
||||||
|
|
||||||
// Memory
|
// Memory
|
||||||
void *internal_mmap(void *addr, uptr length, int prot, int flags,
|
void *internal_mmap(void *addr, uptr length, int prot, int flags,
|
||||||
int fd, u64 offset);
|
int fd, u64 offset);
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,7 @@
|
||||||
#include "sanitizer_mutex.h"
|
#include "sanitizer_mutex.h"
|
||||||
#include "sanitizer_placement_new.h"
|
#include "sanitizer_placement_new.h"
|
||||||
#include "sanitizer_procmaps.h"
|
#include "sanitizer_procmaps.h"
|
||||||
|
#include "sanitizer_stacktrace.h"
|
||||||
|
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
|
|
@ -28,7 +29,9 @@
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
#include <unwind.h>
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
#include <sys/prctl.h>
|
||||||
|
|
||||||
// Are we using 32-bit or 64-bit syscalls?
|
// Are we using 32-bit or 64-bit syscalls?
|
||||||
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
|
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
|
||||||
|
|
@ -215,6 +218,14 @@ void ReExec() {
|
||||||
execv(argv[0], argv.data());
|
execv(argv[0], argv.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PrepareForSandboxing() {
|
||||||
|
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
|
||||||
|
// to read the file mappings from /proc/self/maps. Luckily, neither the
|
||||||
|
// process will be able to load additional libraries, so it's fine to use the
|
||||||
|
// cached mappings.
|
||||||
|
MemoryMappingLayout::CacheMemoryMappings();
|
||||||
|
}
|
||||||
|
|
||||||
// ----------------- sanitizer_procmaps.h
|
// ----------------- sanitizer_procmaps.h
|
||||||
// Linker initialized.
|
// Linker initialized.
|
||||||
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
|
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
|
||||||
|
|
@ -354,6 +365,75 @@ bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset,
|
||||||
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
|
return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool SanitizerSetThreadName(const char *name) {
|
||||||
|
return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT
|
||||||
|
}
|
||||||
|
|
||||||
|
bool SanitizerGetThreadName(char *name, int max_len) {
|
||||||
|
char buff[17];
|
||||||
|
if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT
|
||||||
|
return false;
|
||||||
|
internal_strncpy(name, buff, max_len);
|
||||||
|
name[max_len] = 0;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef SANITIZER_GO
|
||||||
|
//------------------------- SlowUnwindStack -----------------------------------
|
||||||
|
#ifdef __arm__
|
||||||
|
#define UNWIND_STOP _URC_END_OF_STACK
|
||||||
|
#define UNWIND_CONTINUE _URC_NO_REASON
|
||||||
|
#else
|
||||||
|
#define UNWIND_STOP _URC_NORMAL_STOP
|
||||||
|
#define UNWIND_CONTINUE _URC_NO_REASON
|
||||||
|
#endif
|
||||||
|
|
||||||
|
uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
|
||||||
|
#ifdef __arm__
|
||||||
|
uptr val;
|
||||||
|
_Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
|
||||||
|
15 /* r15 = PC */, _UVRSD_UINT32, &val);
|
||||||
|
CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
|
||||||
|
// Clear the Thumb bit.
|
||||||
|
return val & ~(uptr)1;
|
||||||
|
#else
|
||||||
|
return _Unwind_GetIP(ctx);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
|
||||||
|
StackTrace *b = (StackTrace*)param;
|
||||||
|
CHECK(b->size < b->max_size);
|
||||||
|
uptr pc = Unwind_GetIP(ctx);
|
||||||
|
b->trace[b->size++] = pc;
|
||||||
|
if (b->size == b->max_size) return UNWIND_STOP;
|
||||||
|
return UNWIND_CONTINUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool MatchPc(uptr cur_pc, uptr trace_pc) {
|
||||||
|
return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
|
||||||
|
}
|
||||||
|
|
||||||
|
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
|
||||||
|
this->size = 0;
|
||||||
|
this->max_size = max_depth;
|
||||||
|
if (max_depth > 1) {
|
||||||
|
_Unwind_Backtrace(Unwind_Trace, this);
|
||||||
|
// We need to pop a few frames so that pc is on top.
|
||||||
|
// trace[0] belongs to the current function so we always pop it.
|
||||||
|
int to_pop = 1;
|
||||||
|
/**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
|
||||||
|
else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
|
||||||
|
else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
|
||||||
|
else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
|
||||||
|
else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
|
||||||
|
this->PopStackFrames(to_pop);
|
||||||
|
}
|
||||||
|
this->trace[0] = pc;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // #ifndef SANITIZER_GO
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
||||||
#endif // __linux__
|
#endif // __linux__
|
||||||
|
|
|
||||||
|
|
@ -124,6 +124,10 @@ void ReExec() {
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PrepareForSandboxing() {
|
||||||
|
// Nothing here for now.
|
||||||
|
}
|
||||||
|
|
||||||
// ----------------- sanitizer_procmaps.h
|
// ----------------- sanitizer_procmaps.h
|
||||||
|
|
||||||
MemoryMappingLayout::MemoryMappingLayout() {
|
MemoryMappingLayout::MemoryMappingLayout() {
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,27 @@
|
||||||
|
//===-- sanitizer_platform_interceptors.h -----------------------*- C++ -*-===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file defines macro telling whether sanitizer tools can/should intercept
|
||||||
|
// given library functions on a given platform.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#include "sanitizer_internal_defs.h"
|
||||||
|
|
||||||
|
#if !defined(_WIN32)
|
||||||
|
# define SANITIZER_INTERCEPT_READ 1
|
||||||
|
# define SANITIZER_INTERCEPT_PREAD 1
|
||||||
|
#else
|
||||||
|
# define SANITIZER_INTERCEPT_READ 0
|
||||||
|
# define SANITIZER_INTERCEPT_PREAD 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(__linux__) && !defined(ANDROID)
|
||||||
|
# define SANITIZER_INTERCEPT_PREAD64 1
|
||||||
|
#else
|
||||||
|
# define SANITIZER_INTERCEPT_PREAD64 0
|
||||||
|
#endif
|
||||||
|
|
@ -91,6 +91,21 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
|
||||||
|
uptr PageSize = GetPageSizeCached();
|
||||||
|
void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
|
||||||
|
RoundUpTo(size, PageSize),
|
||||||
|
PROT_READ | PROT_WRITE,
|
||||||
|
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
|
||||||
|
-1, 0);
|
||||||
|
if (p == (void*)-1) {
|
||||||
|
Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
|
||||||
|
size, size, fixed_addr, errno);
|
||||||
|
CHECK("unable to mmap" && 0);
|
||||||
|
}
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
void *Mprotect(uptr fixed_addr, uptr size) {
|
void *Mprotect(uptr fixed_addr, uptr size) {
|
||||||
return internal_mmap((void*)fixed_addr, size,
|
return internal_mmap((void*)fixed_addr, size,
|
||||||
PROT_NONE,
|
PROT_NONE,
|
||||||
|
|
@ -98,6 +113,10 @@ void *Mprotect(uptr fixed_addr, uptr size) {
|
||||||
-1, 0);
|
-1, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FlushUnneededShadowMemory(uptr addr, uptr size) {
|
||||||
|
madvise((void*)addr, size, MADV_DONTNEED);
|
||||||
|
}
|
||||||
|
|
||||||
void *MapFileToMemory(const char *file_name, uptr *buff_size) {
|
void *MapFileToMemory(const char *file_name, uptr *buff_size) {
|
||||||
fd_t fd = internal_open(file_name, false);
|
fd_t fd = internal_open(file_name, false);
|
||||||
CHECK_NE(fd, kInvalidFd);
|
CHECK_NE(fd, kInvalidFd);
|
||||||
|
|
|
||||||
|
|
@ -92,7 +92,7 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
|
||||||
int VSNPrintf(char *buff, int buff_length,
|
int VSNPrintf(char *buff, int buff_length,
|
||||||
const char *format, va_list args) {
|
const char *format, va_list args) {
|
||||||
static const char *kPrintfFormatsHelp =
|
static const char *kPrintfFormatsHelp =
|
||||||
"Supported Printf formats: %%(0[0-9]*)?(z|ll)?{d,u,x}; %%p; %%s; %%c\n";
|
"Supported Printf formats: %(0[0-9]*)?(z|ll)?{d,u,x}; %p; %s; %c\n";
|
||||||
RAW_CHECK(format);
|
RAW_CHECK(format);
|
||||||
RAW_CHECK(buff_length > 0);
|
RAW_CHECK(buff_length > 0);
|
||||||
const char *buff_end = &buff[buff_length - 1];
|
const char *buff_end = &buff[buff_length - 1];
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
//===-- sanitizer_report_decorator.h ----------------------------*- C++ -*-===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// Tags to decorate the sanitizer reports.
|
||||||
|
// Currently supported tags:
|
||||||
|
// * None.
|
||||||
|
// * ANSI color sequences.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#ifndef SANITIZER_ALLOCATOR_H
|
||||||
|
#define SANITIZER_ALLOCATOR_H
|
||||||
|
|
||||||
|
namespace __sanitizer {
|
||||||
|
class AnsiColorDecorator {
|
||||||
|
public:
|
||||||
|
explicit AnsiColorDecorator(bool use_ansi_colors) : ansi_(use_ansi_colors) { }
|
||||||
|
const char *Black() { return ansi_ ? "\033[1m\033[30m" : ""; }
|
||||||
|
const char *Red() { return ansi_ ? "\033[1m\033[31m" : ""; }
|
||||||
|
const char *Green() { return ansi_ ? "\033[1m\033[32m" : ""; }
|
||||||
|
const char *Yellow() { return ansi_ ? "\033[1m\033[33m" : ""; }
|
||||||
|
const char *Blue() { return ansi_ ? "\033[1m\033[34m" : ""; }
|
||||||
|
const char *Magenta() { return ansi_ ? "\033[1m\033[35m" : ""; }
|
||||||
|
const char *Cyan() { return ansi_ ? "\033[1m\033[36m" : ""; }
|
||||||
|
const char *White() { return ansi_ ? "\033[1m\033[37m" : ""; }
|
||||||
|
const char *Default() { return ansi_ ? "\033[1m\033[0m" : ""; }
|
||||||
|
private:
|
||||||
|
bool ansi_;
|
||||||
|
};
|
||||||
|
} // namespace __sanitizer
|
||||||
|
#endif // SANITIZER_ALLOCATOR_H
|
||||||
|
|
@ -40,6 +40,12 @@ static struct {
|
||||||
atomic_uint32_t seq[kPartCount]; // Unique id generators.
|
atomic_uint32_t seq[kPartCount]; // Unique id generators.
|
||||||
} depot;
|
} depot;
|
||||||
|
|
||||||
|
static StackDepotStats stats;
|
||||||
|
|
||||||
|
StackDepotStats *StackDepotGetStats() {
|
||||||
|
return &stats;
|
||||||
|
}
|
||||||
|
|
||||||
static u32 hash(const uptr *stack, uptr size) {
|
static u32 hash(const uptr *stack, uptr size) {
|
||||||
// murmur2
|
// murmur2
|
||||||
const u32 m = 0x5bd1e995;
|
const u32 m = 0x5bd1e995;
|
||||||
|
|
@ -75,7 +81,7 @@ static StackDesc *tryallocDesc(uptr memsz) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static StackDesc *allocDesc(uptr size) {
|
static StackDesc *allocDesc(uptr size) {
|
||||||
// Frist, try to allocate optimisitically.
|
// First, try to allocate optimisitically.
|
||||||
uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
|
uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
|
||||||
StackDesc *s = tryallocDesc(memsz);
|
StackDesc *s = tryallocDesc(memsz);
|
||||||
if (s)
|
if (s)
|
||||||
|
|
@ -91,6 +97,7 @@ static StackDesc *allocDesc(uptr size) {
|
||||||
if (allocsz < memsz)
|
if (allocsz < memsz)
|
||||||
allocsz = memsz;
|
allocsz = memsz;
|
||||||
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
|
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
|
||||||
|
stats.mapped += allocsz;
|
||||||
atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
|
atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
|
||||||
atomic_store(&depot.region_pos, mem, memory_order_release);
|
atomic_store(&depot.region_pos, mem, memory_order_release);
|
||||||
}
|
}
|
||||||
|
|
@ -154,6 +161,7 @@ u32 StackDepotPut(const uptr *stack, uptr size) {
|
||||||
}
|
}
|
||||||
uptr part = (h % kTabSize) / kPartSize;
|
uptr part = (h % kTabSize) / kPartSize;
|
||||||
id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
|
id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
|
||||||
|
stats.n_uniq_ids++;
|
||||||
CHECK_LT(id, kMaxId);
|
CHECK_LT(id, kMaxId);
|
||||||
id |= part << kPartShift;
|
id |= part << kPartShift;
|
||||||
CHECK_NE(id, 0);
|
CHECK_NE(id, 0);
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,13 @@ u32 StackDepotPut(const uptr *stack, uptr size);
|
||||||
// Retrieves a stored stack trace by the id.
|
// Retrieves a stored stack trace by the id.
|
||||||
const uptr *StackDepotGet(u32 id, uptr *size);
|
const uptr *StackDepotGet(u32 id, uptr *size);
|
||||||
|
|
||||||
|
struct StackDepotStats {
|
||||||
|
uptr n_uniq_ids;
|
||||||
|
uptr mapped;
|
||||||
|
};
|
||||||
|
|
||||||
|
StackDepotStats *StackDepotGetStats();
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
||||||
#endif // SANITIZER_STACKDEPOT_H
|
#endif // SANITIZER_STACKDEPOT_H
|
||||||
|
|
|
||||||
|
|
@ -23,10 +23,7 @@ static const char *StripPathPrefix(const char *filepath,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------- StackTrace ----------------------------- {{{1
|
// ----------------------- StackTrace ----------------------------- {{{1
|
||||||
// PCs in stack traces are actually the return addresses, that is,
|
uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
|
||||||
// addresses of the next instructions after the call. That's why we
|
|
||||||
// decrement them.
|
|
||||||
static uptr patch_pc(uptr pc) {
|
|
||||||
#ifdef __arm__
|
#ifdef __arm__
|
||||||
// Cancel Thumb bit.
|
// Cancel Thumb bit.
|
||||||
pc = pc & (~1);
|
pc = pc & (~1);
|
||||||
|
|
@ -69,7 +66,9 @@ void StackTrace::PrintStack(const uptr *addr, uptr size,
|
||||||
InternalScopedBuffer<AddressInfo> addr_frames(64);
|
InternalScopedBuffer<AddressInfo> addr_frames(64);
|
||||||
uptr frame_num = 0;
|
uptr frame_num = 0;
|
||||||
for (uptr i = 0; i < size && addr[i]; i++) {
|
for (uptr i = 0; i < size && addr[i]; i++) {
|
||||||
uptr pc = patch_pc(addr[i]);
|
// PCs in stack traces are actually the return addresses, that is,
|
||||||
|
// addresses of the next instructions after the call.
|
||||||
|
uptr pc = GetPreviousInstructionPc(addr[i]);
|
||||||
uptr addr_frames_num = 0; // The number of stack frames for current
|
uptr addr_frames_num = 0; // The number of stack frames for current
|
||||||
// instruction address.
|
// instruction address.
|
||||||
if (symbolize_callback) {
|
if (symbolize_callback) {
|
||||||
|
|
|
||||||
|
|
@ -42,10 +42,12 @@ struct StackTrace {
|
||||||
}
|
}
|
||||||
|
|
||||||
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom);
|
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom);
|
||||||
|
void SlowUnwindStack(uptr pc, uptr max_depth);
|
||||||
|
|
||||||
void PopStackFrames(uptr count);
|
void PopStackFrames(uptr count);
|
||||||
|
|
||||||
static uptr GetCurrentPc();
|
static uptr GetCurrentPc();
|
||||||
|
static uptr GetPreviousInstructionPc(uptr pc);
|
||||||
|
|
||||||
static uptr CompressStack(StackTrace *stack,
|
static uptr CompressStack(StackTrace *stack,
|
||||||
u32 *compressed, uptr size);
|
u32 *compressed, uptr size);
|
||||||
|
|
|
||||||
|
|
@ -58,6 +58,9 @@ struct AddressInfo {
|
||||||
uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
|
uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames);
|
||||||
bool SymbolizeData(uptr address, AddressInfo *frame);
|
bool SymbolizeData(uptr address, AddressInfo *frame);
|
||||||
|
|
||||||
|
// Attempts to demangle the provided C++ mangled name.
|
||||||
|
const char *Demangle(const char *Name);
|
||||||
|
|
||||||
// Starts external symbolizer program in a subprocess. Sanitizer communicates
|
// Starts external symbolizer program in a subprocess. Sanitizer communicates
|
||||||
// with external symbolizer via pipes.
|
// with external symbolizer via pipes.
|
||||||
bool InitializeExternalSymbolizer(const char *path_to_symbolizer);
|
bool InitializeExternalSymbolizer(const char *path_to_symbolizer);
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,40 @@
|
||||||
|
//===-- sanitizer_symbolizer_itanium.cc -----------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is shared between the sanitizer run-time libraries.
|
||||||
|
// Itanium C++ ABI-specific implementation of symbolizer parts.
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
#if defined(__APPLE__) || defined(__linux__)
|
||||||
|
|
||||||
|
#include "sanitizer_symbolizer.h"
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
// C++ demangling function, as required by Itanium C++ ABI. This is weak,
|
||||||
|
// because we do not require a C++ ABI library to be linked to a program
|
||||||
|
// using sanitizers; if it's not present, we'll just use the mangled name.
|
||||||
|
namespace __cxxabiv1 {
|
||||||
|
extern "C" char *__cxa_demangle(const char *mangled, char *buffer,
|
||||||
|
size_t *length, int *status)
|
||||||
|
SANITIZER_WEAK_ATTRIBUTE;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *__sanitizer::Demangle(const char *MangledName) {
|
||||||
|
// FIXME: __cxa_demangle aggressively insists on allocating memory.
|
||||||
|
// There's not much we can do about that, short of providing our
|
||||||
|
// own demangler (libc++abi's implementation could be adapted so that
|
||||||
|
// it does not allocate). For now, we just call it anyway, and we leak
|
||||||
|
// the returned value.
|
||||||
|
if (__cxxabiv1::__cxa_demangle)
|
||||||
|
if (const char *Demangled =
|
||||||
|
__cxxabiv1::__cxa_demangle(MangledName, 0, 0, 0))
|
||||||
|
return Demangled;
|
||||||
|
|
||||||
|
return MangledName;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // __APPLE__ || __linux__
|
||||||
|
|
@ -26,6 +26,10 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules) {
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const char *Demangle(const char *MangledName) {
|
||||||
|
return MangledName;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace __sanitizer
|
} // namespace __sanitizer
|
||||||
|
|
||||||
#endif // _WIN32
|
#endif // _WIN32
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@
|
||||||
#define WIN32_LEAN_AND_MEAN
|
#define WIN32_LEAN_AND_MEAN
|
||||||
#define NOGDI
|
#define NOGDI
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
#include <io.h>
|
||||||
#include <windows.h>
|
#include <windows.h>
|
||||||
|
|
||||||
#include "sanitizer_common.h"
|
#include "sanitizer_common.h"
|
||||||
|
|
@ -73,6 +74,8 @@ void UnmapOrDie(void *addr, uptr size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
|
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
|
||||||
|
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
|
||||||
|
// but on Win64 it does.
|
||||||
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
|
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
|
||||||
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||||
if (p == 0)
|
if (p == 0)
|
||||||
|
|
@ -81,6 +84,10 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
|
||||||
|
return MmapFixedNoReserve(fixed_addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
void *Mprotect(uptr fixed_addr, uptr size) {
|
void *Mprotect(uptr fixed_addr, uptr size) {
|
||||||
return VirtualAlloc((LPVOID)fixed_addr, size,
|
return VirtualAlloc((LPVOID)fixed_addr, size,
|
||||||
MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
|
MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
|
||||||
|
|
@ -127,6 +134,10 @@ void ReExec() {
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PrepareForSandboxing() {
|
||||||
|
// Nothing here for now.
|
||||||
|
}
|
||||||
|
|
||||||
bool StackSizeIsUnlimited() {
|
bool StackSizeIsUnlimited() {
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
}
|
}
|
||||||
|
|
@ -173,7 +184,7 @@ int internal_close(fd_t fd) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int internal_isatty(fd_t fd) {
|
int internal_isatty(fd_t fd) {
|
||||||
UNIMPLEMENTED();
|
return _isatty(fd);
|
||||||
}
|
}
|
||||||
|
|
||||||
fd_t internal_open(const char *filename, bool write) {
|
fd_t internal_open(const char *filename, bool write) {
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,9 @@ tsan_files = \
|
||||||
tsan_interface_ann.cc \
|
tsan_interface_ann.cc \
|
||||||
tsan_mman.cc \
|
tsan_mman.cc \
|
||||||
tsan_rtl_report.cc \
|
tsan_rtl_report.cc \
|
||||||
|
tsan_fd.cc \
|
||||||
|
tsan_interface_java.cc \
|
||||||
|
tsan_mutexset.cc \
|
||||||
tsan_symbolize_addr2line_linux.cc
|
tsan_symbolize_addr2line_linux.cc
|
||||||
|
|
||||||
libtsan_la_SOURCES = $(tsan_files)
|
libtsan_la_SOURCES = $(tsan_files)
|
||||||
|
|
|
||||||
|
|
@ -87,7 +87,8 @@ am__objects_1 = tsan_clock.lo tsan_interface_atomic.lo tsan_mutex.lo \
|
||||||
tsan_rtl.lo tsan_stat.lo tsan_sync.lo tsan_interceptors.lo \
|
tsan_rtl.lo tsan_stat.lo tsan_sync.lo tsan_interceptors.lo \
|
||||||
tsan_md5.lo tsan_platform_mac.lo tsan_rtl_mutex.lo \
|
tsan_md5.lo tsan_platform_mac.lo tsan_rtl_mutex.lo \
|
||||||
tsan_suppressions.lo tsan_interface_ann.lo tsan_mman.lo \
|
tsan_suppressions.lo tsan_interface_ann.lo tsan_mman.lo \
|
||||||
tsan_rtl_report.lo tsan_symbolize_addr2line_linux.lo
|
tsan_rtl_report.lo tsan_fd.lo tsan_interface_java.lo \
|
||||||
|
tsan_mutexset.lo tsan_symbolize_addr2line_linux.lo
|
||||||
am_libtsan_la_OBJECTS = $(am__objects_1)
|
am_libtsan_la_OBJECTS = $(am__objects_1)
|
||||||
libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
|
libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
|
||||||
libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
|
libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
|
||||||
|
|
@ -273,6 +274,9 @@ tsan_files = \
|
||||||
tsan_interface_ann.cc \
|
tsan_interface_ann.cc \
|
||||||
tsan_mman.cc \
|
tsan_mman.cc \
|
||||||
tsan_rtl_report.cc \
|
tsan_rtl_report.cc \
|
||||||
|
tsan_fd.cc \
|
||||||
|
tsan_interface_java.cc \
|
||||||
|
tsan_mutexset.cc \
|
||||||
tsan_symbolize_addr2line_linux.cc
|
tsan_symbolize_addr2line_linux.cc
|
||||||
|
|
||||||
libtsan_la_SOURCES = $(tsan_files)
|
libtsan_la_SOURCES = $(tsan_files)
|
||||||
|
|
@ -393,14 +397,17 @@ distclean-compile:
|
||||||
-rm -f *.tab.c
|
-rm -f *.tab.c
|
||||||
|
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_clock.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_clock.Plo@am__quote@
|
||||||
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_fd.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_flags.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_flags.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interceptors.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interceptors.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_ann.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_ann.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_atomic.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_atomic.Plo@am__quote@
|
||||||
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_java.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_md5.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_md5.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mman.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mman.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutex.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutex.Plo@am__quote@
|
||||||
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutexset.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_linux.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_linux.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_mac.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_mac.Plo@am__quote@
|
||||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_report.Plo@am__quote@
|
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_report.Plo@am__quote@
|
||||||
|
|
|
||||||
|
|
@ -137,6 +137,12 @@ T RoundDown(T p, u64 align) {
|
||||||
return (T)((u64)p & ~(align - 1));
|
return (T)((u64)p & ~(align - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Zeroizes high part, returns 'bits' lsb bits.
|
||||||
|
template<typename T>
|
||||||
|
T GetLsb(T v, int bits) {
|
||||||
|
return (T)((u64)v & ((1ull << bits) - 1));
|
||||||
|
}
|
||||||
|
|
||||||
struct MD5Hash {
|
struct MD5Hash {
|
||||||
u64 hash[2];
|
u64 hash[2];
|
||||||
bool operator==(const MD5Hash &other) const;
|
bool operator==(const MD5Hash &other) const;
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,257 @@
|
||||||
|
//===-- tsan_fd.cc --------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#include "tsan_fd.h"
|
||||||
|
#include "tsan_rtl.h"
|
||||||
|
#include <sanitizer_common/sanitizer_atomic.h>
|
||||||
|
|
||||||
|
namespace __tsan {
|
||||||
|
|
||||||
|
const int kTableSizeL1 = 1024;
|
||||||
|
const int kTableSizeL2 = 1024;
|
||||||
|
const int kTableSize = kTableSizeL1 * kTableSizeL2;
|
||||||
|
|
||||||
|
struct FdSync {
|
||||||
|
atomic_uint64_t rc;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct FdDesc {
|
||||||
|
FdSync *sync;
|
||||||
|
int creation_tid;
|
||||||
|
u32 creation_stack;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct FdContext {
|
||||||
|
atomic_uintptr_t tab[kTableSizeL1];
|
||||||
|
// Addresses used for synchronization.
|
||||||
|
FdSync globsync;
|
||||||
|
FdSync filesync;
|
||||||
|
FdSync socksync;
|
||||||
|
u64 connectsync;
|
||||||
|
};
|
||||||
|
|
||||||
|
static FdContext fdctx;
|
||||||
|
|
||||||
|
static FdSync *allocsync() {
|
||||||
|
FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
|
||||||
|
atomic_store(&s->rc, 1, memory_order_relaxed);
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
static FdSync *ref(FdSync *s) {
|
||||||
|
if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
|
||||||
|
atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void unref(ThreadState *thr, uptr pc, FdSync *s) {
|
||||||
|
if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
|
||||||
|
if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
|
||||||
|
CHECK_NE(s, &fdctx.globsync);
|
||||||
|
CHECK_NE(s, &fdctx.filesync);
|
||||||
|
CHECK_NE(s, &fdctx.socksync);
|
||||||
|
SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s);
|
||||||
|
if (v)
|
||||||
|
DestroyAndFree(v);
|
||||||
|
internal_free(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
CHECK_LT(fd, kTableSize);
|
||||||
|
atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
|
||||||
|
uptr l1 = atomic_load(pl1, memory_order_consume);
|
||||||
|
if (l1 == 0) {
|
||||||
|
uptr size = kTableSizeL2 * sizeof(FdDesc);
|
||||||
|
void *p = internal_alloc(MBlockFD, size);
|
||||||
|
internal_memset(p, 0, size);
|
||||||
|
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
|
||||||
|
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
|
||||||
|
l1 = (uptr)p;
|
||||||
|
else
|
||||||
|
internal_free(p);
|
||||||
|
}
|
||||||
|
return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
|
||||||
|
}
|
||||||
|
|
||||||
|
// pd must be already ref'ed.
|
||||||
|
static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) {
|
||||||
|
FdDesc *d = fddesc(thr, pc, fd);
|
||||||
|
// As a matter of fact, we don't intercept all close calls.
|
||||||
|
// See e.g. libc __res_iclose().
|
||||||
|
if (d->sync) {
|
||||||
|
unref(thr, pc, d->sync);
|
||||||
|
d->sync = 0;
|
||||||
|
}
|
||||||
|
if (flags()->io_sync == 0) {
|
||||||
|
unref(thr, pc, s);
|
||||||
|
} else if (flags()->io_sync == 1) {
|
||||||
|
d->sync = s;
|
||||||
|
} else if (flags()->io_sync == 2) {
|
||||||
|
unref(thr, pc, s);
|
||||||
|
d->sync = &fdctx.globsync;
|
||||||
|
}
|
||||||
|
d->creation_tid = thr->tid;
|
||||||
|
d->creation_stack = CurrentStackId(thr, pc);
|
||||||
|
// To catch races between fd usage and open.
|
||||||
|
MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdInit() {
|
||||||
|
atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
|
||||||
|
atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
|
||||||
|
atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdOnFork(ThreadState *thr, uptr pc) {
|
||||||
|
// On fork() we need to reset all fd's, because the child is going
|
||||||
|
// close all them, and that will cause races between previous read/write
|
||||||
|
// and the close.
|
||||||
|
for (int l1 = 0; l1 < kTableSizeL1; l1++) {
|
||||||
|
FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
|
||||||
|
if (tab == 0)
|
||||||
|
break;
|
||||||
|
for (int l2 = 0; l2 < kTableSizeL2; l2++) {
|
||||||
|
FdDesc *d = &tab[l2];
|
||||||
|
MemoryResetRange(thr, pc, (uptr)d, 8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
|
||||||
|
for (int l1 = 0; l1 < kTableSizeL1; l1++) {
|
||||||
|
FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
|
||||||
|
if (tab == 0)
|
||||||
|
break;
|
||||||
|
if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
|
||||||
|
int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
|
||||||
|
FdDesc *d = &tab[l2];
|
||||||
|
*fd = l1 * kTableSizeL1 + l2;
|
||||||
|
*tid = d->creation_tid;
|
||||||
|
*stack = d->creation_stack;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdAcquire(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
FdDesc *d = fddesc(thr, pc, fd);
|
||||||
|
FdSync *s = d->sync;
|
||||||
|
DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
|
||||||
|
MemoryRead8Byte(thr, pc, (uptr)d);
|
||||||
|
if (s)
|
||||||
|
Acquire(thr, pc, (uptr)s);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdRelease(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
FdDesc *d = fddesc(thr, pc, fd);
|
||||||
|
FdSync *s = d->sync;
|
||||||
|
DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
|
||||||
|
if (s)
|
||||||
|
Release(thr, pc, (uptr)s);
|
||||||
|
MemoryRead8Byte(thr, pc, (uptr)d);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdClose(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
|
||||||
|
FdDesc *d = fddesc(thr, pc, fd);
|
||||||
|
// To catch races between fd usage and close.
|
||||||
|
MemoryWrite8Byte(thr, pc, (uptr)d);
|
||||||
|
// We need to clear it, because if we do not intercept any call out there
|
||||||
|
// that creates fd, we will hit false postives.
|
||||||
|
MemoryResetRange(thr, pc, (uptr)d, 8);
|
||||||
|
unref(thr, pc, d->sync);
|
||||||
|
d->sync = 0;
|
||||||
|
d->creation_tid = 0;
|
||||||
|
d->creation_stack = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
|
||||||
|
init(thr, pc, fd, &fdctx.filesync);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
|
||||||
|
DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
|
||||||
|
// Ignore the case when user dups not yet connected socket.
|
||||||
|
FdDesc *od = fddesc(thr, pc, oldfd);
|
||||||
|
MemoryRead8Byte(thr, pc, (uptr)od);
|
||||||
|
FdClose(thr, pc, newfd);
|
||||||
|
init(thr, pc, newfd, ref(od->sync));
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
|
||||||
|
DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
|
||||||
|
FdSync *s = allocsync();
|
||||||
|
init(thr, pc, rfd, ref(s));
|
||||||
|
init(thr, pc, wfd, ref(s));
|
||||||
|
unref(thr, pc, s);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
|
||||||
|
init(thr, pc, fd, allocsync());
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
|
||||||
|
init(thr, pc, fd, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
|
||||||
|
init(thr, pc, fd, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
|
||||||
|
init(thr, pc, fd, allocsync());
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
|
||||||
|
// It can be a UDP socket.
|
||||||
|
init(thr, pc, fd, &fdctx.socksync);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
|
||||||
|
DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
|
||||||
|
// Synchronize connect->accept.
|
||||||
|
Acquire(thr, pc, (uptr)&fdctx.connectsync);
|
||||||
|
init(thr, pc, newfd, &fdctx.socksync);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
|
||||||
|
// Synchronize connect->accept.
|
||||||
|
Release(thr, pc, (uptr)&fdctx.connectsync);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
|
||||||
|
DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
|
||||||
|
init(thr, pc, fd, &fdctx.socksync);
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr File2addr(char *path) {
|
||||||
|
(void)path;
|
||||||
|
static u64 addr;
|
||||||
|
return (uptr)&addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr Dir2addr(char *path) {
|
||||||
|
(void)path;
|
||||||
|
static u64 addr;
|
||||||
|
return (uptr)&addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace __tsan
|
||||||
|
|
@ -0,0 +1,62 @@
|
||||||
|
//===-- tsan_fd.h -----------------------------------------------*- C++ -*-===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||||
|
//
|
||||||
|
// This file handles synchronization via IO.
|
||||||
|
// People use IO for synchronization along the lines of:
|
||||||
|
//
|
||||||
|
// int X;
|
||||||
|
// int client_socket; // initialized elsewhere
|
||||||
|
// int server_socket; // initialized elsewhere
|
||||||
|
//
|
||||||
|
// Thread 1:
|
||||||
|
// X = 42;
|
||||||
|
// send(client_socket, ...);
|
||||||
|
//
|
||||||
|
// Thread 2:
|
||||||
|
// if (recv(server_socket, ...) > 0)
|
||||||
|
// assert(X == 42);
|
||||||
|
//
|
||||||
|
// This file determines the scope of the file descriptor (pipe, socket,
|
||||||
|
// all local files, etc) and executes acquire and release operations on
|
||||||
|
// the scope as necessary. Some scopes are very fine grained (e.g. pipe
|
||||||
|
// operations synchronize only with operations on the same pipe), while
|
||||||
|
// others are corse-grained (e.g. all operations on local files synchronize
|
||||||
|
// with each other).
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
#ifndef TSAN_FD_H
|
||||||
|
#define TSAN_FD_H
|
||||||
|
|
||||||
|
#include "tsan_rtl.h"
|
||||||
|
|
||||||
|
namespace __tsan {
|
||||||
|
|
||||||
|
void FdInit();
|
||||||
|
void FdAcquire(ThreadState *thr, uptr pc, int fd);
|
||||||
|
void FdRelease(ThreadState *thr, uptr pc, int fd);
|
||||||
|
void FdClose(ThreadState *thr, uptr pc, int fd);
|
||||||
|
void FdFileCreate(ThreadState *thr, uptr pc, int fd);
|
||||||
|
void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd);
|
||||||
|
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd);
|
||||||
|
void FdEventCreate(ThreadState *thr, uptr pc, int fd);
|
||||||
|
void FdSignalCreate(ThreadState *thr, uptr pc, int fd);
|
||||||
|
void FdInotifyCreate(ThreadState *thr, uptr pc, int fd);
|
||||||
|
void FdPollCreate(ThreadState *thr, uptr pc, int fd);
|
||||||
|
void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
|
||||||
|
void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
|
||||||
|
void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
|
||||||
|
void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
|
||||||
|
bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
|
||||||
|
void FdOnFork(ThreadState *thr, uptr pc);
|
||||||
|
|
||||||
|
uptr File2addr(char *path);
|
||||||
|
uptr Dir2addr(char *path);
|
||||||
|
|
||||||
|
} // namespace __tsan
|
||||||
|
|
||||||
|
#endif // TSAN_INTERFACE_H
|
||||||
|
|
@ -56,6 +56,7 @@ void InitializeFlags(Flags *f, const char *env) {
|
||||||
f->running_on_valgrind = false;
|
f->running_on_valgrind = false;
|
||||||
f->external_symbolizer_path = "";
|
f->external_symbolizer_path = "";
|
||||||
f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
|
f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
|
||||||
|
f->io_sync = 1;
|
||||||
|
|
||||||
// Let a frontend override.
|
// Let a frontend override.
|
||||||
OverrideFlags(f);
|
OverrideFlags(f);
|
||||||
|
|
@ -81,6 +82,7 @@ void InitializeFlags(Flags *f, const char *env) {
|
||||||
ParseFlag(env, &f->stop_on_start, "stop_on_start");
|
ParseFlag(env, &f->stop_on_start, "stop_on_start");
|
||||||
ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path");
|
ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path");
|
||||||
ParseFlag(env, &f->history_size, "history_size");
|
ParseFlag(env, &f->history_size, "history_size");
|
||||||
|
ParseFlag(env, &f->io_sync, "io_sync");
|
||||||
|
|
||||||
if (!f->report_bugs) {
|
if (!f->report_bugs) {
|
||||||
f->report_thread_leaks = false;
|
f->report_thread_leaks = false;
|
||||||
|
|
@ -93,6 +95,12 @@ void InitializeFlags(Flags *f, const char *env) {
|
||||||
" (must be [0..7])\n");
|
" (must be [0..7])\n");
|
||||||
Die();
|
Die();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (f->io_sync < 0 || f->io_sync > 2) {
|
||||||
|
Printf("ThreadSanitizer: incorrect value for io_sync"
|
||||||
|
" (must be [0..2])\n");
|
||||||
|
Die();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace __tsan
|
} // namespace __tsan
|
||||||
|
|
|
||||||
|
|
@ -75,6 +75,11 @@ struct Flags {
|
||||||
// the amount of memory accesses, up to history_size=7 that amounts to
|
// the amount of memory accesses, up to history_size=7 that amounts to
|
||||||
// 4M memory accesses. The default value is 2 (128K memory accesses).
|
// 4M memory accesses. The default value is 2 (128K memory accesses).
|
||||||
int history_size;
|
int history_size;
|
||||||
|
// Controls level of synchronization implied by IO operations.
|
||||||
|
// 0 - no synchronization
|
||||||
|
// 1 - reasonable level of synchronization (write->read)
|
||||||
|
// 2 - global synchronization of all IO operations
|
||||||
|
int io_sync;
|
||||||
};
|
};
|
||||||
|
|
||||||
Flags *flags();
|
Flags *flags();
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,8 @@
|
||||||
//
|
//
|
||||||
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||||
//
|
//
|
||||||
|
// FIXME: move as many interceptors as possible into
|
||||||
|
// sanitizer_common/sanitizer_common_interceptors.h
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
#include "sanitizer_common/sanitizer_atomic.h"
|
#include "sanitizer_common/sanitizer_atomic.h"
|
||||||
|
|
@ -18,6 +20,7 @@
|
||||||
#include "tsan_platform.h"
|
#include "tsan_platform.h"
|
||||||
#include "tsan_rtl.h"
|
#include "tsan_rtl.h"
|
||||||
#include "tsan_mman.h"
|
#include "tsan_mman.h"
|
||||||
|
#include "tsan_fd.h"
|
||||||
|
|
||||||
using namespace __tsan; // NOLINT
|
using namespace __tsan; // NOLINT
|
||||||
|
|
||||||
|
|
@ -50,6 +53,7 @@ extern "C" void *pthread_self();
|
||||||
extern "C" void _exit(int status);
|
extern "C" void _exit(int status);
|
||||||
extern "C" int __cxa_atexit(void (*func)(void *arg), void *arg, void *dso);
|
extern "C" int __cxa_atexit(void (*func)(void *arg), void *arg, void *dso);
|
||||||
extern "C" int *__errno_location();
|
extern "C" int *__errno_location();
|
||||||
|
extern "C" int fileno_unlocked(void *stream);
|
||||||
const int PTHREAD_MUTEX_RECURSIVE = 1;
|
const int PTHREAD_MUTEX_RECURSIVE = 1;
|
||||||
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
|
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
|
||||||
const int kPthreadAttrSize = 56;
|
const int kPthreadAttrSize = 56;
|
||||||
|
|
@ -124,10 +128,8 @@ static SignalContext *SigCtx(ThreadState *thr) {
|
||||||
SignalContext *ctx = (SignalContext*)thr->signal_ctx;
|
SignalContext *ctx = (SignalContext*)thr->signal_ctx;
|
||||||
if (ctx == 0 && thr->is_alive) {
|
if (ctx == 0 && thr->is_alive) {
|
||||||
ScopedInRtl in_rtl;
|
ScopedInRtl in_rtl;
|
||||||
ctx = (SignalContext*)internal_alloc(
|
ctx = (SignalContext*)MmapOrDie(sizeof(*ctx), "SignalContext");
|
||||||
MBlockSignal, sizeof(*ctx));
|
MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
|
||||||
MemoryResetRange(thr, 0, (uptr)ctx, sizeof(*ctx));
|
|
||||||
internal_memset(ctx, 0, sizeof(*ctx));
|
|
||||||
thr->signal_ctx = ctx;
|
thr->signal_ctx = ctx;
|
||||||
}
|
}
|
||||||
return ctx;
|
return ctx;
|
||||||
|
|
@ -173,8 +175,8 @@ ScopedInterceptor::~ScopedInterceptor() {
|
||||||
StatInc(thr, StatInt_##func); \
|
StatInc(thr, StatInt_##func); \
|
||||||
const uptr caller_pc = GET_CALLER_PC(); \
|
const uptr caller_pc = GET_CALLER_PC(); \
|
||||||
ScopedInterceptor si(thr, #func, caller_pc); \
|
ScopedInterceptor si(thr, #func, caller_pc); \
|
||||||
/* Subtract one from pc as we need current instruction address */ \
|
const uptr pc = __sanitizer::StackTrace::GetPreviousInstructionPc( \
|
||||||
const uptr pc = __sanitizer::StackTrace::GetCurrentPc() - 1; \
|
__sanitizer::StackTrace::GetCurrentPc()); \
|
||||||
(void)pc; \
|
(void)pc; \
|
||||||
/**/
|
/**/
|
||||||
|
|
||||||
|
|
@ -306,30 +308,6 @@ TSAN_INTERCEPTOR(void, siglongjmp, void *env, int val) {
|
||||||
Die();
|
Die();
|
||||||
}
|
}
|
||||||
|
|
||||||
static uptr fd2addr(int fd) {
|
|
||||||
(void)fd;
|
|
||||||
static u64 addr;
|
|
||||||
return (uptr)&addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uptr epollfd2addr(int fd) {
|
|
||||||
(void)fd;
|
|
||||||
static u64 addr;
|
|
||||||
return (uptr)&addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uptr file2addr(char *path) {
|
|
||||||
(void)path;
|
|
||||||
static u64 addr;
|
|
||||||
return (uptr)&addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uptr dir2addr(char *path) {
|
|
||||||
(void)path;
|
|
||||||
static u64 addr;
|
|
||||||
return (uptr)&addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(void*, malloc, uptr size) {
|
TSAN_INTERCEPTOR(void*, malloc, uptr size) {
|
||||||
void *p = 0;
|
void *p = 0;
|
||||||
{
|
{
|
||||||
|
|
@ -660,7 +638,7 @@ static void thread_finalize(void *v) {
|
||||||
SignalContext *sctx = thr->signal_ctx;
|
SignalContext *sctx = thr->signal_ctx;
|
||||||
if (sctx) {
|
if (sctx) {
|
||||||
thr->signal_ctx = 0;
|
thr->signal_ctx = 0;
|
||||||
internal_free(sctx);
|
UnmapOrDie(sctx, sizeof(*sctx));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -934,11 +912,15 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// libpthread.so contains several versions of pthread_cond_init symbol.
|
||||||
|
// When we just dlsym() it, we get the wrong (old) version.
|
||||||
|
/*
|
||||||
TSAN_INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
|
TSAN_INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, c, a);
|
SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, c, a);
|
||||||
int res = REAL(pthread_cond_init)(c, a);
|
int res = REAL(pthread_cond_init)(c, a);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(int, pthread_cond_destroy, void *c) {
|
TSAN_INTERCEPTOR(int, pthread_cond_destroy, void *c) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, c);
|
SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, c);
|
||||||
|
|
@ -1080,11 +1062,188 @@ TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
|
||||||
|
int fd = REAL(open)(name, flags, mode);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdFileCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
|
||||||
|
int fd = REAL(open64)(name, flags, mode);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdFileCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
|
||||||
|
int fd = REAL(creat)(name, mode);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdFileCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
|
||||||
|
int fd = REAL(creat64)(name, mode);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdFileCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, dup, int oldfd) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
|
||||||
|
int newfd = REAL(dup)(oldfd);
|
||||||
|
if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
|
||||||
|
FdDup(thr, pc, oldfd, newfd);
|
||||||
|
return newfd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
|
||||||
|
int newfd2 = REAL(dup2)(oldfd, newfd);
|
||||||
|
if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
|
||||||
|
FdDup(thr, pc, oldfd, newfd2);
|
||||||
|
return newfd2;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
|
||||||
|
int newfd2 = REAL(dup3)(oldfd, newfd, flags);
|
||||||
|
if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
|
||||||
|
FdDup(thr, pc, oldfd, newfd2);
|
||||||
|
return newfd2;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
|
||||||
|
int fd = REAL(eventfd)(initval, flags);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdEventCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdClose(thr, pc, fd);
|
||||||
|
fd = REAL(signalfd)(fd, mask, flags);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdSignalCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, inotify_init, int fake) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
|
||||||
|
int fd = REAL(inotify_init)(fake);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdInotifyCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
|
||||||
|
int fd = REAL(inotify_init1)(flags);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdInotifyCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
|
||||||
|
int fd = REAL(socket)(domain, type, protocol);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdSocketCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
|
||||||
|
int res = REAL(socketpair)(domain, type, protocol, fd);
|
||||||
|
if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
|
||||||
|
FdPipeCreate(thr, pc, fd[0], fd[1]);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
|
||||||
|
FdSocketConnecting(thr, pc, fd);
|
||||||
|
int res = REAL(connect)(fd, addr, addrlen);
|
||||||
|
if (res == 0 && fd >= 0)
|
||||||
|
FdSocketConnect(thr, pc, fd);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(accept, fd, addr, addrlen);
|
||||||
|
int fd2 = REAL(accept)(fd, addr, addrlen);
|
||||||
|
if (fd >= 0 && fd2 >= 0)
|
||||||
|
FdSocketAccept(thr, pc, fd, fd2);
|
||||||
|
return fd2;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(accept4, fd, addr, addrlen, f);
|
||||||
|
int fd2 = REAL(accept4)(fd, addr, addrlen, f);
|
||||||
|
if (fd >= 0 && fd2 >= 0)
|
||||||
|
FdSocketAccept(thr, pc, fd, fd2);
|
||||||
|
return fd2;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, epoll_create, int size) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
|
||||||
|
int fd = REAL(epoll_create)(size);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdPollCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
|
||||||
|
int fd = REAL(epoll_create1)(flags);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdPollCreate(thr, pc, fd);
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, close, int fd) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(close, fd);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdClose(thr, pc, fd);
|
||||||
|
return REAL(close)(fd);
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, __close, int fd) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(__close, fd);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdClose(thr, pc, fd);
|
||||||
|
return REAL(__close)(fd);
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
|
||||||
|
int res = REAL(pipe)(pipefd);
|
||||||
|
if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
|
||||||
|
FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
|
||||||
|
int res = REAL(pipe2)(pipefd, flags);
|
||||||
|
if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
|
||||||
|
FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(long_t, read, int fd, void *buf, long_t sz) {
|
TSAN_INTERCEPTOR(long_t, read, int fd, void *buf, long_t sz) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(read, fd, buf, sz);
|
SCOPED_TSAN_INTERCEPTOR(read, fd, buf, sz);
|
||||||
int res = REAL(read)(fd, buf, sz);
|
int res = REAL(read)(fd, buf, sz);
|
||||||
if (res >= 0) {
|
if (res >= 0 && fd >= 0) {
|
||||||
Acquire(thr, pc, fd2addr(fd));
|
FdAcquire(thr, pc, fd);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -1092,8 +1251,8 @@ TSAN_INTERCEPTOR(long_t, read, int fd, void *buf, long_t sz) {
|
||||||
TSAN_INTERCEPTOR(long_t, pread, int fd, void *buf, long_t sz, unsigned off) {
|
TSAN_INTERCEPTOR(long_t, pread, int fd, void *buf, long_t sz, unsigned off) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(pread, fd, buf, sz, off);
|
SCOPED_TSAN_INTERCEPTOR(pread, fd, buf, sz, off);
|
||||||
int res = REAL(pread)(fd, buf, sz, off);
|
int res = REAL(pread)(fd, buf, sz, off);
|
||||||
if (res >= 0) {
|
if (res >= 0 && fd >= 0) {
|
||||||
Acquire(thr, pc, fd2addr(fd));
|
FdAcquire(thr, pc, fd);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -1101,8 +1260,8 @@ TSAN_INTERCEPTOR(long_t, pread, int fd, void *buf, long_t sz, unsigned off) {
|
||||||
TSAN_INTERCEPTOR(long_t, pread64, int fd, void *buf, long_t sz, u64 off) {
|
TSAN_INTERCEPTOR(long_t, pread64, int fd, void *buf, long_t sz, u64 off) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(pread64, fd, buf, sz, off);
|
SCOPED_TSAN_INTERCEPTOR(pread64, fd, buf, sz, off);
|
||||||
int res = REAL(pread64)(fd, buf, sz, off);
|
int res = REAL(pread64)(fd, buf, sz, off);
|
||||||
if (res >= 0) {
|
if (res >= 0 && fd >= 0) {
|
||||||
Acquire(thr, pc, fd2addr(fd));
|
FdAcquire(thr, pc, fd);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -1110,8 +1269,8 @@ TSAN_INTERCEPTOR(long_t, pread64, int fd, void *buf, long_t sz, u64 off) {
|
||||||
TSAN_INTERCEPTOR(long_t, readv, int fd, void *vec, int cnt) {
|
TSAN_INTERCEPTOR(long_t, readv, int fd, void *vec, int cnt) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(readv, fd, vec, cnt);
|
SCOPED_TSAN_INTERCEPTOR(readv, fd, vec, cnt);
|
||||||
int res = REAL(readv)(fd, vec, cnt);
|
int res = REAL(readv)(fd, vec, cnt);
|
||||||
if (res >= 0) {
|
if (res >= 0 && fd >= 0) {
|
||||||
Acquire(thr, pc, fd2addr(fd));
|
FdAcquire(thr, pc, fd);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -1119,57 +1278,64 @@ TSAN_INTERCEPTOR(long_t, readv, int fd, void *vec, int cnt) {
|
||||||
TSAN_INTERCEPTOR(long_t, preadv64, int fd, void *vec, int cnt, u64 off) {
|
TSAN_INTERCEPTOR(long_t, preadv64, int fd, void *vec, int cnt, u64 off) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(preadv64, fd, vec, cnt, off);
|
SCOPED_TSAN_INTERCEPTOR(preadv64, fd, vec, cnt, off);
|
||||||
int res = REAL(preadv64)(fd, vec, cnt, off);
|
int res = REAL(preadv64)(fd, vec, cnt, off);
|
||||||
if (res >= 0) {
|
if (res >= 0 && fd >= 0) {
|
||||||
Acquire(thr, pc, fd2addr(fd));
|
FdAcquire(thr, pc, fd);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(long_t, write, int fd, void *buf, long_t sz) {
|
TSAN_INTERCEPTOR(long_t, write, int fd, void *buf, long_t sz) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(write, fd, buf, sz);
|
SCOPED_TSAN_INTERCEPTOR(write, fd, buf, sz);
|
||||||
Release(thr, pc, fd2addr(fd));
|
if (fd >= 0)
|
||||||
|
FdRelease(thr, pc, fd);
|
||||||
int res = REAL(write)(fd, buf, sz);
|
int res = REAL(write)(fd, buf, sz);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(long_t, pwrite, int fd, void *buf, long_t sz, unsigned off) {
|
TSAN_INTERCEPTOR(long_t, pwrite, int fd, void *buf, long_t sz, unsigned off) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(pwrite, fd, buf, sz, off);
|
SCOPED_TSAN_INTERCEPTOR(pwrite, fd, buf, sz, off);
|
||||||
Release(thr, pc, fd2addr(fd));
|
if (fd >= 0)
|
||||||
|
FdRelease(thr, pc, fd);
|
||||||
int res = REAL(pwrite)(fd, buf, sz, off);
|
int res = REAL(pwrite)(fd, buf, sz, off);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(long_t, pwrite64, int fd, void *buf, long_t sz, u64 off) {
|
TSAN_INTERCEPTOR(long_t, pwrite64, int fd, void *buf, long_t sz, u64 off) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(pwrite64, fd, buf, sz, off);
|
SCOPED_TSAN_INTERCEPTOR(pwrite64, fd, buf, sz, off);
|
||||||
Release(thr, pc, fd2addr(fd));
|
if (fd >= 0)
|
||||||
|
FdRelease(thr, pc, fd);
|
||||||
int res = REAL(pwrite64)(fd, buf, sz, off);
|
int res = REAL(pwrite64)(fd, buf, sz, off);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(long_t, writev, int fd, void *vec, int cnt) {
|
TSAN_INTERCEPTOR(long_t, writev, int fd, void *vec, int cnt) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(writev, fd, vec, cnt);
|
SCOPED_TSAN_INTERCEPTOR(writev, fd, vec, cnt);
|
||||||
Release(thr, pc, fd2addr(fd));
|
if (fd >= 0)
|
||||||
|
FdRelease(thr, pc, fd);
|
||||||
int res = REAL(writev)(fd, vec, cnt);
|
int res = REAL(writev)(fd, vec, cnt);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(long_t, pwritev64, int fd, void *vec, int cnt, u64 off) {
|
TSAN_INTERCEPTOR(long_t, pwritev64, int fd, void *vec, int cnt, u64 off) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(pwritev64, fd, vec, cnt, off);
|
SCOPED_TSAN_INTERCEPTOR(pwritev64, fd, vec, cnt, off);
|
||||||
Release(thr, pc, fd2addr(fd));
|
if (fd >= 0)
|
||||||
|
FdRelease(thr, pc, fd);
|
||||||
int res = REAL(pwritev64)(fd, vec, cnt, off);
|
int res = REAL(pwritev64)(fd, vec, cnt, off);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) {
|
TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags);
|
SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags);
|
||||||
Release(thr, pc, fd2addr(fd));
|
if (fd >= 0)
|
||||||
|
FdRelease(thr, pc, fd);
|
||||||
int res = REAL(send)(fd, buf, len, flags);
|
int res = REAL(send)(fd, buf, len, flags);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) {
|
TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(sendmsg, fd, msg, flags);
|
SCOPED_TSAN_INTERCEPTOR(sendmsg, fd, msg, flags);
|
||||||
Release(thr, pc, fd2addr(fd));
|
if (fd >= 0)
|
||||||
|
FdRelease(thr, pc, fd);
|
||||||
int res = REAL(sendmsg)(fd, msg, flags);
|
int res = REAL(sendmsg)(fd, msg, flags);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -1177,8 +1343,8 @@ TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) {
|
||||||
TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) {
|
TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(recv, fd, buf, len, flags);
|
SCOPED_TSAN_INTERCEPTOR(recv, fd, buf, len, flags);
|
||||||
int res = REAL(recv)(fd, buf, len, flags);
|
int res = REAL(recv)(fd, buf, len, flags);
|
||||||
if (res >= 0) {
|
if (res >= 0 && fd >= 0) {
|
||||||
Acquire(thr, pc, fd2addr(fd));
|
FdAcquire(thr, pc, fd);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -1186,15 +1352,15 @@ TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) {
|
||||||
TSAN_INTERCEPTOR(long_t, recvmsg, int fd, void *msg, int flags) {
|
TSAN_INTERCEPTOR(long_t, recvmsg, int fd, void *msg, int flags) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(recvmsg, fd, msg, flags);
|
SCOPED_TSAN_INTERCEPTOR(recvmsg, fd, msg, flags);
|
||||||
int res = REAL(recvmsg)(fd, msg, flags);
|
int res = REAL(recvmsg)(fd, msg, flags);
|
||||||
if (res >= 0) {
|
if (res >= 0 && fd >= 0) {
|
||||||
Acquire(thr, pc, fd2addr(fd));
|
FdAcquire(thr, pc, fd);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(int, unlink, char *path) {
|
TSAN_INTERCEPTOR(int, unlink, char *path) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(unlink, path);
|
SCOPED_TSAN_INTERCEPTOR(unlink, path);
|
||||||
Release(thr, pc, file2addr(path));
|
Release(thr, pc, File2addr(path));
|
||||||
int res = REAL(unlink)(path);
|
int res = REAL(unlink)(path);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -1202,19 +1368,57 @@ TSAN_INTERCEPTOR(int, unlink, char *path) {
|
||||||
TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
|
TSAN_INTERCEPTOR(void*, fopen, char *path, char *mode) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(fopen, path, mode);
|
SCOPED_TSAN_INTERCEPTOR(fopen, path, mode);
|
||||||
void *res = REAL(fopen)(path, mode);
|
void *res = REAL(fopen)(path, mode);
|
||||||
Acquire(thr, pc, file2addr(path));
|
Acquire(thr, pc, File2addr(path));
|
||||||
|
if (res) {
|
||||||
|
int fd = fileno_unlocked(res);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdFileCreate(thr, pc, fd);
|
||||||
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(void*, freopen, char *path, char *mode, void *stream) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(freopen, path, mode, stream);
|
||||||
|
if (stream) {
|
||||||
|
int fd = fileno_unlocked(stream);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdClose(thr, pc, fd);
|
||||||
|
}
|
||||||
|
void *res = REAL(freopen)(path, mode, stream);
|
||||||
|
Acquire(thr, pc, File2addr(path));
|
||||||
|
if (res) {
|
||||||
|
int fd = fileno_unlocked(res);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdFileCreate(thr, pc, fd);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, fclose, void *stream) {
|
||||||
|
{
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(fclose, stream);
|
||||||
|
if (stream) {
|
||||||
|
int fd = fileno_unlocked(stream);
|
||||||
|
if (fd >= 0)
|
||||||
|
FdClose(thr, pc, fd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return REAL(fclose)(stream);
|
||||||
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
|
TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f);
|
{
|
||||||
MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true);
|
SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f);
|
||||||
|
MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true);
|
||||||
|
}
|
||||||
return REAL(fread)(ptr, size, nmemb, f);
|
return REAL(fread)(ptr, size, nmemb, f);
|
||||||
}
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
|
TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f);
|
{
|
||||||
MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false);
|
SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f);
|
||||||
|
MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false);
|
||||||
|
}
|
||||||
return REAL(fwrite)(p, size, nmemb, f);
|
return REAL(fwrite)(p, size, nmemb, f);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1226,7 +1430,7 @@ TSAN_INTERCEPTOR(int, puts, const char *s) {
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(int, rmdir, char *path) {
|
TSAN_INTERCEPTOR(int, rmdir, char *path) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(rmdir, path);
|
SCOPED_TSAN_INTERCEPTOR(rmdir, path);
|
||||||
Release(thr, pc, dir2addr(path));
|
Release(thr, pc, Dir2addr(path));
|
||||||
int res = REAL(rmdir)(path);
|
int res = REAL(rmdir)(path);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -1234,14 +1438,15 @@ TSAN_INTERCEPTOR(int, rmdir, char *path) {
|
||||||
TSAN_INTERCEPTOR(void*, opendir, char *path) {
|
TSAN_INTERCEPTOR(void*, opendir, char *path) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(opendir, path);
|
SCOPED_TSAN_INTERCEPTOR(opendir, path);
|
||||||
void *res = REAL(opendir)(path);
|
void *res = REAL(opendir)(path);
|
||||||
Acquire(thr, pc, dir2addr(path));
|
if (res != 0)
|
||||||
|
Acquire(thr, pc, Dir2addr(path));
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
|
TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
|
SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
|
||||||
if (op == EPOLL_CTL_ADD) {
|
if (op == EPOLL_CTL_ADD && epfd >= 0) {
|
||||||
Release(thr, pc, epollfd2addr(epfd));
|
FdRelease(thr, pc, epfd);
|
||||||
}
|
}
|
||||||
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
|
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
|
||||||
return res;
|
return res;
|
||||||
|
|
@ -1250,8 +1455,8 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
|
||||||
TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
|
TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
|
||||||
SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
|
SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
|
||||||
int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
|
int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
|
||||||
if (res > 0) {
|
if (res > 0 && epfd >= 0) {
|
||||||
Acquire(thr, pc, epollfd2addr(epfd));
|
FdAcquire(thr, pc, epfd);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
@ -1423,6 +1628,19 @@ TSAN_INTERCEPTOR(int, munlockall, void) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TSAN_INTERCEPTOR(int, fork, int fake) {
|
||||||
|
SCOPED_TSAN_INTERCEPTOR(fork, fake);
|
||||||
|
// It's intercepted merely to process pending signals.
|
||||||
|
int pid = REAL(fork)(fake);
|
||||||
|
if (pid == 0) {
|
||||||
|
// child
|
||||||
|
FdOnFork(thr, pc);
|
||||||
|
} else if (pid > 0) {
|
||||||
|
// parent
|
||||||
|
}
|
||||||
|
return pid;
|
||||||
|
}
|
||||||
|
|
||||||
namespace __tsan {
|
namespace __tsan {
|
||||||
|
|
||||||
void ProcessPendingSignals(ThreadState *thr) {
|
void ProcessPendingSignals(ThreadState *thr) {
|
||||||
|
|
@ -1545,7 +1763,7 @@ void InitializeInterceptors() {
|
||||||
TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
|
TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
|
||||||
TSAN_INTERCEPT(pthread_rwlock_unlock);
|
TSAN_INTERCEPT(pthread_rwlock_unlock);
|
||||||
|
|
||||||
TSAN_INTERCEPT(pthread_cond_init);
|
// TSAN_INTERCEPT(pthread_cond_init);
|
||||||
TSAN_INTERCEPT(pthread_cond_destroy);
|
TSAN_INTERCEPT(pthread_cond_destroy);
|
||||||
TSAN_INTERCEPT(pthread_cond_signal);
|
TSAN_INTERCEPT(pthread_cond_signal);
|
||||||
TSAN_INTERCEPT(pthread_cond_broadcast);
|
TSAN_INTERCEPT(pthread_cond_broadcast);
|
||||||
|
|
@ -1566,6 +1784,28 @@ void InitializeInterceptors() {
|
||||||
TSAN_INTERCEPT(sem_post);
|
TSAN_INTERCEPT(sem_post);
|
||||||
TSAN_INTERCEPT(sem_getvalue);
|
TSAN_INTERCEPT(sem_getvalue);
|
||||||
|
|
||||||
|
TSAN_INTERCEPT(open);
|
||||||
|
TSAN_INTERCEPT(open64);
|
||||||
|
TSAN_INTERCEPT(creat);
|
||||||
|
TSAN_INTERCEPT(creat64);
|
||||||
|
TSAN_INTERCEPT(dup);
|
||||||
|
TSAN_INTERCEPT(dup2);
|
||||||
|
TSAN_INTERCEPT(dup3);
|
||||||
|
TSAN_INTERCEPT(eventfd);
|
||||||
|
TSAN_INTERCEPT(signalfd);
|
||||||
|
TSAN_INTERCEPT(inotify_init);
|
||||||
|
TSAN_INTERCEPT(inotify_init1);
|
||||||
|
TSAN_INTERCEPT(socket);
|
||||||
|
TSAN_INTERCEPT(socketpair);
|
||||||
|
TSAN_INTERCEPT(connect);
|
||||||
|
TSAN_INTERCEPT(accept);
|
||||||
|
TSAN_INTERCEPT(accept4);
|
||||||
|
TSAN_INTERCEPT(epoll_create);
|
||||||
|
TSAN_INTERCEPT(epoll_create1);
|
||||||
|
TSAN_INTERCEPT(close);
|
||||||
|
TSAN_INTERCEPT(pipe);
|
||||||
|
TSAN_INTERCEPT(pipe2);
|
||||||
|
|
||||||
TSAN_INTERCEPT(read);
|
TSAN_INTERCEPT(read);
|
||||||
TSAN_INTERCEPT(pread);
|
TSAN_INTERCEPT(pread);
|
||||||
TSAN_INTERCEPT(pread64);
|
TSAN_INTERCEPT(pread64);
|
||||||
|
|
@ -1583,6 +1823,8 @@ void InitializeInterceptors() {
|
||||||
|
|
||||||
TSAN_INTERCEPT(unlink);
|
TSAN_INTERCEPT(unlink);
|
||||||
TSAN_INTERCEPT(fopen);
|
TSAN_INTERCEPT(fopen);
|
||||||
|
TSAN_INTERCEPT(freopen);
|
||||||
|
TSAN_INTERCEPT(fclose);
|
||||||
TSAN_INTERCEPT(fread);
|
TSAN_INTERCEPT(fread);
|
||||||
TSAN_INTERCEPT(fwrite);
|
TSAN_INTERCEPT(fwrite);
|
||||||
TSAN_INTERCEPT(puts);
|
TSAN_INTERCEPT(puts);
|
||||||
|
|
@ -1608,6 +1850,8 @@ void InitializeInterceptors() {
|
||||||
TSAN_INTERCEPT(mlockall);
|
TSAN_INTERCEPT(mlockall);
|
||||||
TSAN_INTERCEPT(munlockall);
|
TSAN_INTERCEPT(munlockall);
|
||||||
|
|
||||||
|
TSAN_INTERCEPT(fork);
|
||||||
|
|
||||||
// Need to setup it, because interceptors check that the function is resolved.
|
// Need to setup it, because interceptors check that the function is resolved.
|
||||||
// But atexit is emitted directly into the module, so can't be resolved.
|
// But atexit is emitted directly into the module, so can't be resolved.
|
||||||
REAL(atexit) = (int(*)(void(*)()))unreachable;
|
REAL(atexit) = (int(*)(void(*)()))unreachable;
|
||||||
|
|
@ -1623,6 +1867,8 @@ void InitializeInterceptors() {
|
||||||
Printf("ThreadSanitizer: failed to create thread key\n");
|
Printf("ThreadSanitizer: failed to create thread key\n");
|
||||||
Die();
|
Die();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FdInit();
|
||||||
}
|
}
|
||||||
|
|
||||||
void internal_start_thread(void(*func)(void *arg), void *arg) {
|
void internal_start_thread(void(*func)(void *arg), void *arg) {
|
||||||
|
|
|
||||||
|
|
@ -229,7 +229,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
|
||||||
// Assume the access is atomic.
|
// Assume the access is atomic.
|
||||||
if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
|
if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
|
||||||
return *a;
|
return *a;
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, false);
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
|
||||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||||
thr->clock.acquire(&s->clock);
|
thr->clock.acquire(&s->clock);
|
||||||
T v = *a;
|
T v = *a;
|
||||||
|
|
@ -251,7 +251,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
__sync_synchronize();
|
__sync_synchronize();
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
|
||||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||||
thr->clock.ReleaseStore(&s->clock);
|
thr->clock.ReleaseStore(&s->clock);
|
||||||
*a = v;
|
*a = v;
|
||||||
|
|
@ -263,7 +263,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
|
||||||
|
|
||||||
template<typename T, T (*F)(volatile T *v, T op)>
|
template<typename T, T (*F)(volatile T *v, T op)>
|
||||||
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
|
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
|
||||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||||
if (IsAcqRelOrder(mo))
|
if (IsAcqRelOrder(mo))
|
||||||
thr->clock.acq_rel(&s->clock);
|
thr->clock.acq_rel(&s->clock);
|
||||||
|
|
@ -322,7 +322,7 @@ template<typename T>
|
||||||
static bool AtomicCAS(ThreadState *thr, uptr pc,
|
static bool AtomicCAS(ThreadState *thr, uptr pc,
|
||||||
volatile T *a, T *c, T v, morder mo, morder fmo) {
|
volatile T *a, T *c, T v, morder mo, morder fmo) {
|
||||||
(void)fmo; // Unused because llvm does not pass it yet.
|
(void)fmo; // Unused because llvm does not pass it yet.
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
|
||||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||||
if (IsAcqRelOrder(mo))
|
if (IsAcqRelOrder(mo))
|
||||||
thr->clock.acq_rel(&s->clock);
|
thr->clock.acq_rel(&s->clock);
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ typedef long __tsan_atomic64; // NOLINT
|
||||||
|
|
||||||
#if defined(__SIZEOF_INT128__) \
|
#if defined(__SIZEOF_INT128__) \
|
||||||
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
|
|| (__clang_major__ * 100 + __clang_minor__ >= 302)
|
||||||
typedef __int128 __tsan_atomic128;
|
__extension__ typedef __int128 __tsan_atomic128;
|
||||||
#define __TSAN_HAS_INT128 1
|
#define __TSAN_HAS_INT128 1
|
||||||
#else
|
#else
|
||||||
typedef char __tsan_atomic128;
|
typedef char __tsan_atomic128;
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,303 @@
|
||||||
|
//===-- tsan_interface_java.cc --------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#include "tsan_interface_java.h"
|
||||||
|
#include "tsan_rtl.h"
|
||||||
|
#include "tsan_mutex.h"
|
||||||
|
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||||
|
#include "sanitizer_common/sanitizer_common.h"
|
||||||
|
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||||
|
|
||||||
|
using namespace __tsan; // NOLINT
|
||||||
|
|
||||||
|
namespace __tsan {
|
||||||
|
|
||||||
|
const uptr kHeapShadow = 0x300000000000ull;
|
||||||
|
const uptr kHeapAlignment = 8;
|
||||||
|
|
||||||
|
struct BlockDesc {
|
||||||
|
bool begin;
|
||||||
|
Mutex mtx;
|
||||||
|
SyncVar *head;
|
||||||
|
|
||||||
|
BlockDesc()
|
||||||
|
: mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
|
||||||
|
, head() {
|
||||||
|
CHECK_EQ(begin, false);
|
||||||
|
begin = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
~BlockDesc() {
|
||||||
|
CHECK_EQ(begin, true);
|
||||||
|
begin = false;
|
||||||
|
ThreadState *thr = cur_thread();
|
||||||
|
SyncVar *s = head;
|
||||||
|
while (s) {
|
||||||
|
SyncVar *s1 = s->next;
|
||||||
|
StatInc(thr, StatSyncDestroyed);
|
||||||
|
s->mtx.Lock();
|
||||||
|
s->mtx.Unlock();
|
||||||
|
thr->mset.Remove(s->GetId());
|
||||||
|
DestroyAndFree(s);
|
||||||
|
s = s1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct JavaContext {
|
||||||
|
const uptr heap_begin;
|
||||||
|
const uptr heap_size;
|
||||||
|
BlockDesc *heap_shadow;
|
||||||
|
|
||||||
|
JavaContext(jptr heap_begin, jptr heap_size)
|
||||||
|
: heap_begin(heap_begin)
|
||||||
|
, heap_size(heap_size) {
|
||||||
|
uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
|
||||||
|
heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
|
||||||
|
if ((uptr)heap_shadow != kHeapShadow) {
|
||||||
|
Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
|
||||||
|
Die();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class ScopedJavaFunc {
|
||||||
|
public:
|
||||||
|
ScopedJavaFunc(ThreadState *thr, uptr pc)
|
||||||
|
: thr_(thr) {
|
||||||
|
Initialize(thr_);
|
||||||
|
FuncEntry(thr, pc);
|
||||||
|
CHECK_EQ(thr_->in_rtl, 0);
|
||||||
|
thr_->in_rtl++;
|
||||||
|
}
|
||||||
|
|
||||||
|
~ScopedJavaFunc() {
|
||||||
|
thr_->in_rtl--;
|
||||||
|
CHECK_EQ(thr_->in_rtl, 0);
|
||||||
|
FuncExit(thr_);
|
||||||
|
// FIXME(dvyukov): process pending signals.
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
ThreadState *thr_;
|
||||||
|
};
|
||||||
|
|
||||||
|
static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
|
||||||
|
static JavaContext *jctx;
|
||||||
|
|
||||||
|
static BlockDesc *getblock(uptr addr) {
|
||||||
|
uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
|
||||||
|
return &jctx->heap_shadow[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
static uptr USED getmem(BlockDesc *b) {
|
||||||
|
uptr i = b - jctx->heap_shadow;
|
||||||
|
uptr p = jctx->heap_begin + i * kHeapAlignment;
|
||||||
|
CHECK_GE(p, jctx->heap_begin);
|
||||||
|
CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
static BlockDesc *getblockbegin(uptr addr) {
|
||||||
|
for (BlockDesc *b = getblock(addr);; b--) {
|
||||||
|
CHECK_GE(b, jctx->heap_shadow);
|
||||||
|
if (b->begin)
|
||||||
|
return b;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
|
||||||
|
bool write_lock, bool create) {
|
||||||
|
if (jctx == 0 || addr < jctx->heap_begin
|
||||||
|
|| addr >= jctx->heap_begin + jctx->heap_size)
|
||||||
|
return 0;
|
||||||
|
BlockDesc *b = getblockbegin(addr);
|
||||||
|
DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
|
||||||
|
Lock l(&b->mtx);
|
||||||
|
SyncVar *s = b->head;
|
||||||
|
for (; s; s = s->next) {
|
||||||
|
if (s->addr == addr) {
|
||||||
|
DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (s == 0 && create) {
|
||||||
|
DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
|
||||||
|
s = CTX()->synctab.Create(thr, pc, addr);
|
||||||
|
s->next = b->head;
|
||||||
|
b->head = s;
|
||||||
|
}
|
||||||
|
if (s) {
|
||||||
|
if (write_lock)
|
||||||
|
s->mtx.Lock();
|
||||||
|
else
|
||||||
|
s->mtx.ReadLock();
|
||||||
|
}
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
|
// We do not destroy Java mutexes other than in __tsan_java_free().
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace __tsan {
|
||||||
|
|
||||||
|
#define SCOPED_JAVA_FUNC(func) \
|
||||||
|
ThreadState *thr = cur_thread(); \
|
||||||
|
const uptr caller_pc = GET_CALLER_PC(); \
|
||||||
|
const uptr pc = (uptr)&func; \
|
||||||
|
(void)pc; \
|
||||||
|
ScopedJavaFunc scoped(thr, caller_pc); \
|
||||||
|
/**/
|
||||||
|
|
||||||
|
void __tsan_java_init(jptr heap_begin, jptr heap_size) {
|
||||||
|
SCOPED_JAVA_FUNC(__tsan_java_init);
|
||||||
|
DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size);
|
||||||
|
CHECK_EQ(jctx, 0);
|
||||||
|
CHECK_GT(heap_begin, 0);
|
||||||
|
CHECK_GT(heap_size, 0);
|
||||||
|
CHECK_EQ(heap_begin % kHeapAlignment, 0);
|
||||||
|
CHECK_EQ(heap_size % kHeapAlignment, 0);
|
||||||
|
CHECK_LT(heap_begin, heap_begin + heap_size);
|
||||||
|
jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
int __tsan_java_fini() {
|
||||||
|
SCOPED_JAVA_FUNC(__tsan_java_fini);
|
||||||
|
DPrintf("#%d: java_fini()\n", thr->tid);
|
||||||
|
CHECK_NE(jctx, 0);
|
||||||
|
// FIXME(dvyukov): this does not call atexit() callbacks.
|
||||||
|
int status = Finalize(thr);
|
||||||
|
DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __tsan_java_alloc(jptr ptr, jptr size) {
|
||||||
|
SCOPED_JAVA_FUNC(__tsan_java_alloc);
|
||||||
|
DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size);
|
||||||
|
CHECK_NE(jctx, 0);
|
||||||
|
CHECK_NE(size, 0);
|
||||||
|
CHECK_EQ(ptr % kHeapAlignment, 0);
|
||||||
|
CHECK_EQ(size % kHeapAlignment, 0);
|
||||||
|
CHECK_GE(ptr, jctx->heap_begin);
|
||||||
|
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
|
||||||
|
|
||||||
|
BlockDesc *b = getblock(ptr);
|
||||||
|
new(b) BlockDesc();
|
||||||
|
}
|
||||||
|
|
||||||
|
void __tsan_java_free(jptr ptr, jptr size) {
|
||||||
|
SCOPED_JAVA_FUNC(__tsan_java_free);
|
||||||
|
DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size);
|
||||||
|
CHECK_NE(jctx, 0);
|
||||||
|
CHECK_NE(size, 0);
|
||||||
|
CHECK_EQ(ptr % kHeapAlignment, 0);
|
||||||
|
CHECK_EQ(size % kHeapAlignment, 0);
|
||||||
|
CHECK_GE(ptr, jctx->heap_begin);
|
||||||
|
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
|
||||||
|
|
||||||
|
BlockDesc *beg = getblock(ptr);
|
||||||
|
BlockDesc *end = getblock(ptr + size);
|
||||||
|
for (BlockDesc *b = beg; b != end; b++) {
|
||||||
|
if (b->begin)
|
||||||
|
b->~BlockDesc();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void __tsan_java_move(jptr src, jptr dst, jptr size) {
|
||||||
|
SCOPED_JAVA_FUNC(__tsan_java_move);
|
||||||
|
DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size);
|
||||||
|
CHECK_NE(jctx, 0);
|
||||||
|
CHECK_NE(size, 0);
|
||||||
|
CHECK_EQ(src % kHeapAlignment, 0);
|
||||||
|
CHECK_EQ(dst % kHeapAlignment, 0);
|
||||||
|
CHECK_EQ(size % kHeapAlignment, 0);
|
||||||
|
CHECK_GE(src, jctx->heap_begin);
|
||||||
|
CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
|
||||||
|
CHECK_GE(dst, jctx->heap_begin);
|
||||||
|
CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
|
||||||
|
CHECK(dst >= src + size || src >= dst + size);
|
||||||
|
|
||||||
|
// Assuming it's not running concurrently with threads that do
|
||||||
|
// memory accesses and mutex operations (stop-the-world phase).
|
||||||
|
{ // NOLINT
|
||||||
|
BlockDesc *s = getblock(src);
|
||||||
|
BlockDesc *d = getblock(dst);
|
||||||
|
BlockDesc *send = getblock(src + size);
|
||||||
|
for (; s != send; s++, d++) {
|
||||||
|
CHECK_EQ(d->begin, false);
|
||||||
|
if (s->begin) {
|
||||||
|
DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
|
||||||
|
new(d) BlockDesc;
|
||||||
|
d->head = s->head;
|
||||||
|
for (SyncVar *sync = d->head; sync; sync = sync->next) {
|
||||||
|
uptr newaddr = sync->addr - src + dst;
|
||||||
|
DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
|
||||||
|
sync->addr = newaddr;
|
||||||
|
}
|
||||||
|
s->head = 0;
|
||||||
|
s->~BlockDesc();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // NOLINT
|
||||||
|
u64 *s = (u64*)MemToShadow(src);
|
||||||
|
u64 *d = (u64*)MemToShadow(dst);
|
||||||
|
u64 *send = (u64*)MemToShadow(src + size);
|
||||||
|
for (; s != send; s++, d++) {
|
||||||
|
*d = *s;
|
||||||
|
*s = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void __tsan_java_mutex_lock(jptr addr) {
|
||||||
|
SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
|
||||||
|
DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
|
||||||
|
CHECK_NE(jctx, 0);
|
||||||
|
CHECK_GE(addr, jctx->heap_begin);
|
||||||
|
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
|
||||||
|
|
||||||
|
MutexLock(thr, pc, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __tsan_java_mutex_unlock(jptr addr) {
|
||||||
|
SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock);
|
||||||
|
DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr);
|
||||||
|
CHECK_NE(jctx, 0);
|
||||||
|
CHECK_GE(addr, jctx->heap_begin);
|
||||||
|
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
|
||||||
|
|
||||||
|
MutexUnlock(thr, pc, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __tsan_java_mutex_read_lock(jptr addr) {
|
||||||
|
SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock);
|
||||||
|
DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr);
|
||||||
|
CHECK_NE(jctx, 0);
|
||||||
|
CHECK_GE(addr, jctx->heap_begin);
|
||||||
|
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
|
||||||
|
|
||||||
|
MutexReadLock(thr, pc, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __tsan_java_mutex_read_unlock(jptr addr) {
|
||||||
|
SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock);
|
||||||
|
DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr);
|
||||||
|
CHECK_NE(jctx, 0);
|
||||||
|
CHECK_GE(addr, jctx->heap_begin);
|
||||||
|
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
|
||||||
|
|
||||||
|
MutexReadUnlock(thr, pc, addr);
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,72 @@
|
||||||
|
//===-- tsan_interface_java.h -----------------------------------*- C++ -*-===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||||
|
//
|
||||||
|
// Interface for verification of Java or mixed Java/C++ programs.
|
||||||
|
// The interface is intended to be used from within a JVM and notify TSan
|
||||||
|
// about such events like Java locks and GC memory compaction.
|
||||||
|
//
|
||||||
|
// For plain memory accesses and function entry/exit a JVM is intended to use
|
||||||
|
// C++ interfaces: __tsan_readN/writeN and __tsan_func_enter/exit.
|
||||||
|
//
|
||||||
|
// For volatile memory accesses and atomic operations JVM is intended to use
|
||||||
|
// standard atomics API: __tsan_atomicN_load/store/etc.
|
||||||
|
//
|
||||||
|
// For usage examples see lit_tests/java_*.cc
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
#ifndef TSAN_INTERFACE_JAVA_H
|
||||||
|
#define TSAN_INTERFACE_JAVA_H
|
||||||
|
|
||||||
|
#ifndef INTERFACE_ATTRIBUTE
|
||||||
|
# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef unsigned long jptr; // NOLINT
|
||||||
|
|
||||||
|
// Must be called before any other callback from Java.
|
||||||
|
void __tsan_java_init(jptr heap_begin, jptr heap_size) INTERFACE_ATTRIBUTE;
|
||||||
|
// Must be called when the application exits.
|
||||||
|
// Not necessary the last callback (concurrently running threads are OK).
|
||||||
|
// Returns exit status or 0 if tsan does not want to override it.
|
||||||
|
int __tsan_java_fini() INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
|
// Callback for memory allocations.
|
||||||
|
// May be omitted for allocations that are not subject to data races
|
||||||
|
// nor contain synchronization objects (e.g. String).
|
||||||
|
void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
|
||||||
|
// Callback for memory free.
|
||||||
|
// Can be aggregated for several objects (preferably).
|
||||||
|
void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
|
||||||
|
// Callback for memory move by GC.
|
||||||
|
// Can be aggregated for several objects (preferably).
|
||||||
|
// The ranges must not overlap.
|
||||||
|
void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
|
// Mutex lock.
|
||||||
|
// Addr is any unique address associated with the mutex.
|
||||||
|
// Must not be called on recursive reentry.
|
||||||
|
// Object.wait() is handled as a pair of unlock/lock.
|
||||||
|
void __tsan_java_mutex_lock(jptr addr) INTERFACE_ATTRIBUTE;
|
||||||
|
// Mutex unlock.
|
||||||
|
void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE;
|
||||||
|
// Mutex read lock.
|
||||||
|
void __tsan_java_mutex_read_lock(jptr addr) INTERFACE_ATTRIBUTE;
|
||||||
|
// Mutex read unlock.
|
||||||
|
void __tsan_java_mutex_read_unlock(jptr addr) INTERFACE_ATTRIBUTE;
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
} // extern "C"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#undef INTERFACE_ATTRIBUTE
|
||||||
|
|
||||||
|
#endif // #ifndef TSAN_INTERFACE_JAVA_H
|
||||||
|
|
@ -58,8 +58,9 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
|
||||||
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
|
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
|
||||||
if (p == 0)
|
if (p == 0)
|
||||||
return 0;
|
return 0;
|
||||||
MBlock *b = (MBlock*)allocator()->GetMetaData(p);
|
MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
|
||||||
b->size = sz;
|
b->size = sz;
|
||||||
|
b->head = 0;
|
||||||
b->alloc_tid = thr->unique_id;
|
b->alloc_tid = thr->unique_id;
|
||||||
b->alloc_stack_id = CurrentStackId(thr, pc);
|
b->alloc_stack_id = CurrentStackId(thr, pc);
|
||||||
if (CTX() && CTX()->initialized) {
|
if (CTX() && CTX()->initialized) {
|
||||||
|
|
@ -90,6 +91,7 @@ void user_free(ThreadState *thr, uptr pc, void *p) {
|
||||||
if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
|
if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
|
||||||
MemoryRangeFreed(thr, pc, (uptr)p, b->size);
|
MemoryRangeFreed(thr, pc, (uptr)p, b->size);
|
||||||
}
|
}
|
||||||
|
b->~MBlock();
|
||||||
allocator()->Deallocate(&thr->alloc_cache, p);
|
allocator()->Deallocate(&thr->alloc_cache, p);
|
||||||
SignalUnsafeCall(thr, pc);
|
SignalUnsafeCall(thr, pc);
|
||||||
}
|
}
|
||||||
|
|
@ -115,9 +117,11 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
|
||||||
}
|
}
|
||||||
|
|
||||||
MBlock *user_mblock(ThreadState *thr, void *p) {
|
MBlock *user_mblock(ThreadState *thr, void *p) {
|
||||||
// CHECK_GT(thr->in_rtl, 0);
|
|
||||||
CHECK_NE(p, (void*)0);
|
CHECK_NE(p, (void*)0);
|
||||||
return (MBlock*)allocator()->GetMetaData(p);
|
Allocator *a = allocator();
|
||||||
|
void *b = a->GetBlockBegin(p);
|
||||||
|
CHECK_NE(b, 0);
|
||||||
|
return (MBlock*)a->GetMetaData(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
void invoke_malloc_hook(void *ptr, uptr size) {
|
void invoke_malloc_hook(void *ptr, uptr size) {
|
||||||
|
|
|
||||||
|
|
@ -57,6 +57,7 @@ enum MBlockType {
|
||||||
MBlockSuppression,
|
MBlockSuppression,
|
||||||
MBlockExpectRace,
|
MBlockExpectRace,
|
||||||
MBlockSignal,
|
MBlockSignal,
|
||||||
|
MBlockFD,
|
||||||
|
|
||||||
// This must be the last.
|
// This must be the last.
|
||||||
MBlockTypeCount
|
MBlockTypeCount
|
||||||
|
|
|
||||||
|
|
@ -23,22 +23,28 @@ namespace __tsan {
|
||||||
// then Report mutex can be locked while under Threads mutex.
|
// then Report mutex can be locked while under Threads mutex.
|
||||||
// The leaf mutexes can be locked under any other mutexes.
|
// The leaf mutexes can be locked under any other mutexes.
|
||||||
// Recursive locking is not supported.
|
// Recursive locking is not supported.
|
||||||
|
#if TSAN_DEBUG && !TSAN_GO
|
||||||
const MutexType MutexTypeLeaf = (MutexType)-1;
|
const MutexType MutexTypeLeaf = (MutexType)-1;
|
||||||
static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
|
static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
|
||||||
/*0 MutexTypeInvalid*/ {},
|
/*0 MutexTypeInvalid*/ {},
|
||||||
/*1 MutexTypeTrace*/ {MutexTypeLeaf},
|
/*1 MutexTypeTrace*/ {MutexTypeLeaf},
|
||||||
/*2 MutexTypeThreads*/ {MutexTypeReport},
|
/*2 MutexTypeThreads*/ {MutexTypeReport},
|
||||||
/*3 MutexTypeReport*/ {},
|
/*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeMBlock,
|
||||||
/*4 MutexTypeSyncVar*/ {},
|
MutexTypeJavaMBlock},
|
||||||
/*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
|
/*4 MutexTypeSyncVar*/ {},
|
||||||
/*6 MutexTypeSlab*/ {MutexTypeLeaf},
|
/*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
|
||||||
/*7 MutexTypeAnnotations*/ {},
|
/*6 MutexTypeSlab*/ {MutexTypeLeaf},
|
||||||
/*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
|
/*7 MutexTypeAnnotations*/ {},
|
||||||
|
/*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
|
||||||
|
/*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
|
||||||
|
/*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
|
static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
|
||||||
|
#endif
|
||||||
|
|
||||||
void InitializeMutex() {
|
void InitializeMutex() {
|
||||||
|
#if TSAN_DEBUG && !TSAN_GO
|
||||||
// Build the "can lock" adjacency matrix.
|
// Build the "can lock" adjacency matrix.
|
||||||
// If [i][j]==true, then one can lock mutex j while under mutex i.
|
// If [i][j]==true, then one can lock mutex j while under mutex i.
|
||||||
const int N = MutexTypeCount;
|
const int N = MutexTypeCount;
|
||||||
|
|
@ -112,14 +118,18 @@ void InitializeMutex() {
|
||||||
Die();
|
Die();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
DeadlockDetector::DeadlockDetector() {
|
DeadlockDetector::DeadlockDetector() {
|
||||||
// Rely on zero initialization because some mutexes can be locked before ctor.
|
// Rely on zero initialization because some mutexes can be locked before ctor.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if TSAN_DEBUG && !TSAN_GO
|
||||||
void DeadlockDetector::Lock(MutexType t) {
|
void DeadlockDetector::Lock(MutexType t) {
|
||||||
// Printf("LOCK %d @%zu\n", t, seq_ + 1);
|
// Printf("LOCK %d @%zu\n", t, seq_ + 1);
|
||||||
|
CHECK_GT(t, MutexTypeInvalid);
|
||||||
|
CHECK_LT(t, MutexTypeCount);
|
||||||
u64 max_seq = 0;
|
u64 max_seq = 0;
|
||||||
u64 max_idx = MutexTypeInvalid;
|
u64 max_idx = MutexTypeInvalid;
|
||||||
for (int i = 0; i != MutexTypeCount; i++) {
|
for (int i = 0; i != MutexTypeCount; i++) {
|
||||||
|
|
@ -148,6 +158,7 @@ void DeadlockDetector::Unlock(MutexType t) {
|
||||||
CHECK(locked_[t]);
|
CHECK(locked_[t]);
|
||||||
locked_[t] = 0;
|
locked_[t] = 0;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
const uptr kUnlocked = 0;
|
const uptr kUnlocked = 0;
|
||||||
const uptr kWriteLock = 1;
|
const uptr kWriteLock = 1;
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,8 @@ enum MutexType {
|
||||||
MutexTypeSlab,
|
MutexTypeSlab,
|
||||||
MutexTypeAnnotations,
|
MutexTypeAnnotations,
|
||||||
MutexTypeAtExit,
|
MutexTypeAtExit,
|
||||||
|
MutexTypeMBlock,
|
||||||
|
MutexTypeJavaMBlock,
|
||||||
|
|
||||||
// This must be the last.
|
// This must be the last.
|
||||||
MutexTypeCount
|
MutexTypeCount
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,87 @@
|
||||||
|
//===-- tsan_mutexset.cc --------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
#include "tsan_mutexset.h"
|
||||||
|
#include "tsan_rtl.h"
|
||||||
|
|
||||||
|
namespace __tsan {
|
||||||
|
|
||||||
|
const uptr MutexSet::kMaxSize;
|
||||||
|
|
||||||
|
MutexSet::MutexSet() {
|
||||||
|
size_ = 0;
|
||||||
|
internal_memset(&descs_, 0, sizeof(descs_));
|
||||||
|
}
|
||||||
|
|
||||||
|
void MutexSet::Add(u64 id, bool write, u64 epoch) {
|
||||||
|
// Look up existing mutex with the same id.
|
||||||
|
for (uptr i = 0; i < size_; i++) {
|
||||||
|
if (descs_[i].id == id) {
|
||||||
|
descs_[i].count++;
|
||||||
|
descs_[i].epoch = epoch;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// On overflow, find the oldest mutex and drop it.
|
||||||
|
if (size_ == kMaxSize) {
|
||||||
|
u64 minepoch = (u64)-1;
|
||||||
|
u64 mini = (u64)-1;
|
||||||
|
for (uptr i = 0; i < size_; i++) {
|
||||||
|
if (descs_[i].epoch < minepoch) {
|
||||||
|
minepoch = descs_[i].epoch;
|
||||||
|
mini = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RemovePos(mini);
|
||||||
|
CHECK_EQ(size_, kMaxSize - 1);
|
||||||
|
}
|
||||||
|
// Add new mutex descriptor.
|
||||||
|
descs_[size_].id = id;
|
||||||
|
descs_[size_].write = write;
|
||||||
|
descs_[size_].epoch = epoch;
|
||||||
|
descs_[size_].count = 1;
|
||||||
|
size_++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MutexSet::Del(u64 id, bool write) {
|
||||||
|
for (uptr i = 0; i < size_; i++) {
|
||||||
|
if (descs_[i].id == id) {
|
||||||
|
if (--descs_[i].count == 0)
|
||||||
|
RemovePos(i);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MutexSet::Remove(u64 id) {
|
||||||
|
for (uptr i = 0; i < size_; i++) {
|
||||||
|
if (descs_[i].id == id) {
|
||||||
|
RemovePos(i);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void MutexSet::RemovePos(uptr i) {
|
||||||
|
CHECK_LT(i, size_);
|
||||||
|
descs_[i] = descs_[size_ - 1];
|
||||||
|
size_--;
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr MutexSet::Size() const {
|
||||||
|
return size_;
|
||||||
|
}
|
||||||
|
|
||||||
|
MutexSet::Desc MutexSet::Get(uptr i) const {
|
||||||
|
CHECK_LT(i, size_);
|
||||||
|
return descs_[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace __tsan
|
||||||
|
|
@ -0,0 +1,63 @@
|
||||||
|
//===-- tsan_mutexset.h -----------------------------------------*- C++ -*-===//
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
//
|
||||||
|
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||||
|
//
|
||||||
|
// MutexSet holds the set of mutexes currently held by a thread.
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
#ifndef TSAN_MUTEXSET_H
|
||||||
|
#define TSAN_MUTEXSET_H
|
||||||
|
|
||||||
|
#include "tsan_defs.h"
|
||||||
|
|
||||||
|
namespace __tsan {
|
||||||
|
|
||||||
|
class MutexSet {
|
||||||
|
public:
|
||||||
|
// Holds limited number of mutexes.
|
||||||
|
// The oldest mutexes are discarded on overflow.
|
||||||
|
static const uptr kMaxSize = 64;
|
||||||
|
struct Desc {
|
||||||
|
u64 id;
|
||||||
|
u64 epoch;
|
||||||
|
int count;
|
||||||
|
bool write;
|
||||||
|
};
|
||||||
|
|
||||||
|
MutexSet();
|
||||||
|
// The 'id' is obtained from SyncVar::GetId().
|
||||||
|
void Add(u64 id, bool write, u64 epoch);
|
||||||
|
void Del(u64 id, bool write);
|
||||||
|
void Remove(u64 id); // Removes the mutex completely (if it's destroyed).
|
||||||
|
uptr Size() const;
|
||||||
|
Desc Get(uptr i) const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
#ifndef TSAN_GO
|
||||||
|
uptr size_;
|
||||||
|
Desc descs_[kMaxSize];
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void RemovePos(uptr i);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Go does not have mutexes, so do not spend memory and time.
|
||||||
|
// (Go sync.Mutex is actually a semaphore -- can be unlocked
|
||||||
|
// in different goroutine).
|
||||||
|
#ifdef TSAN_GO
|
||||||
|
MutexSet::MutexSet() {}
|
||||||
|
void MutexSet::Add(u64 id, bool write, u64 epoch) {}
|
||||||
|
void MutexSet::Del(u64 id, bool write) {}
|
||||||
|
void MutexSet::Remove(u64 id) {}
|
||||||
|
void MutexSet::RemovePos(uptr i) {}
|
||||||
|
uptr MutexSet::Size() const { return 0; }
|
||||||
|
MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} // namespace __tsan
|
||||||
|
|
||||||
|
#endif // TSAN_REPORT_H
|
||||||
|
|
@ -135,7 +135,6 @@ void FlushShadowMemory();
|
||||||
|
|
||||||
const char *InitializePlatform();
|
const char *InitializePlatform();
|
||||||
void FinalizePlatform();
|
void FinalizePlatform();
|
||||||
void MapThreadTrace(uptr addr, uptr size);
|
|
||||||
uptr ALWAYS_INLINE INLINE GetThreadTrace(int tid) {
|
uptr ALWAYS_INLINE INLINE GetThreadTrace(int tid) {
|
||||||
uptr p = kTraceMemBegin + (uptr)tid * kTraceSize * sizeof(Event);
|
uptr p = kTraceMemBegin + (uptr)tid * kTraceSize * sizeof(Event);
|
||||||
DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
|
DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
|
||||||
|
|
|
||||||
|
|
@ -69,9 +69,7 @@ uptr GetShadowMemoryConsumption() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlushShadowMemory() {
|
void FlushShadowMemory() {
|
||||||
madvise((void*)kLinuxShadowBeg,
|
FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg);
|
||||||
kLinuxShadowEnd - kLinuxShadowBeg,
|
|
||||||
MADV_DONTNEED);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef TSAN_GO
|
#ifndef TSAN_GO
|
||||||
|
|
@ -118,16 +116,6 @@ void InitializeShadowMemory() {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void MapThreadTrace(uptr addr, uptr size) {
|
|
||||||
DPrintf("Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
|
|
||||||
CHECK_GE(addr, kTraceMemBegin);
|
|
||||||
CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
|
|
||||||
if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
|
|
||||||
Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
|
|
||||||
Die();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static uptr g_data_start;
|
static uptr g_data_start;
|
||||||
static uptr g_data_end;
|
static uptr g_data_end;
|
||||||
|
|
||||||
|
|
@ -180,18 +168,14 @@ static uptr g_tls_size;
|
||||||
#else
|
#else
|
||||||
# define INTERNAL_FUNCTION
|
# define INTERNAL_FUNCTION
|
||||||
#endif
|
#endif
|
||||||
extern "C" void _dl_get_tls_static_info(size_t*, size_t*)
|
|
||||||
__attribute__((weak)) INTERNAL_FUNCTION;
|
|
||||||
|
|
||||||
static int InitTlsSize() {
|
static int InitTlsSize() {
|
||||||
typedef void (*get_tls_func)(size_t*, size_t*) INTERNAL_FUNCTION;
|
typedef void (*get_tls_func)(size_t*, size_t*) INTERNAL_FUNCTION;
|
||||||
get_tls_func get_tls = &_dl_get_tls_static_info;
|
get_tls_func get_tls;
|
||||||
if (get_tls == 0) {
|
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
|
||||||
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
|
CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr));
|
||||||
CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr));
|
internal_memcpy(&get_tls, &get_tls_static_info_ptr,
|
||||||
internal_memcpy(&get_tls, &get_tls_static_info_ptr,
|
sizeof(get_tls_static_info_ptr));
|
||||||
sizeof(get_tls_static_info_ptr));
|
|
||||||
}
|
|
||||||
CHECK_NE(get_tls, 0);
|
CHECK_NE(get_tls, 0);
|
||||||
size_t tls_size = 0;
|
size_t tls_size = 0;
|
||||||
size_t tls_align = 0;
|
size_t tls_align = 0;
|
||||||
|
|
@ -220,29 +204,35 @@ const char *InitializePlatform() {
|
||||||
// Disable core dumps, dumping of 16TB usually takes a bit long.
|
// Disable core dumps, dumping of 16TB usually takes a bit long.
|
||||||
setlim(RLIMIT_CORE, 0);
|
setlim(RLIMIT_CORE, 0);
|
||||||
}
|
}
|
||||||
bool reexec = false;
|
|
||||||
// TSan doesn't play well with unlimited stack size (as stack
|
|
||||||
// overlaps with shadow memory). If we detect unlimited stack size,
|
|
||||||
// we re-exec the program with limited stack size as a best effort.
|
|
||||||
if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
|
|
||||||
const uptr kMaxStackSize = 32 * 1024 * 1024;
|
|
||||||
Report("WARNING: Program is run with unlimited stack size, which "
|
|
||||||
"wouldn't work with ThreadSanitizer.\n");
|
|
||||||
Report("Re-execing with stack size limited to %zd bytes.\n", kMaxStackSize);
|
|
||||||
SetStackSizeLimitInBytes(kMaxStackSize);
|
|
||||||
reexec = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (getlim(RLIMIT_AS) != (rlim_t)-1) {
|
// Go maps shadow memory lazily and works fine with limited address space.
|
||||||
Report("WARNING: Program is run with limited virtual address space, which "
|
// Unlimited stack is not a problem as well, because the executable
|
||||||
"wouldn't work with ThreadSanitizer.\n");
|
// is not compiled with -pie.
|
||||||
Report("Re-execing with unlimited virtual address space.\n");
|
if (kCppMode) {
|
||||||
setlim(RLIMIT_AS, -1);
|
bool reexec = false;
|
||||||
reexec = true;
|
// TSan doesn't play well with unlimited stack size (as stack
|
||||||
}
|
// overlaps with shadow memory). If we detect unlimited stack size,
|
||||||
|
// we re-exec the program with limited stack size as a best effort.
|
||||||
|
if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
|
||||||
|
const uptr kMaxStackSize = 32 * 1024 * 1024;
|
||||||
|
Report("WARNING: Program is run with unlimited stack size, which "
|
||||||
|
"wouldn't work with ThreadSanitizer.\n");
|
||||||
|
Report("Re-execing with stack size limited to %zd bytes.\n",
|
||||||
|
kMaxStackSize);
|
||||||
|
SetStackSizeLimitInBytes(kMaxStackSize);
|
||||||
|
reexec = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (reexec)
|
if (getlim(RLIMIT_AS) != (rlim_t)-1) {
|
||||||
ReExec();
|
Report("WARNING: Program is run with limited virtual address space,"
|
||||||
|
" which wouldn't work with ThreadSanitizer.\n");
|
||||||
|
Report("Re-execing with unlimited virtual address space.\n");
|
||||||
|
setlim(RLIMIT_AS, -1);
|
||||||
|
reexec = true;
|
||||||
|
}
|
||||||
|
if (reexec)
|
||||||
|
ReExec();
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef TSAN_GO
|
#ifndef TSAN_GO
|
||||||
CheckPIE();
|
CheckPIE();
|
||||||
|
|
|
||||||
|
|
@ -23,12 +23,24 @@ ReportDesc::ReportDesc()
|
||||||
, sleep() {
|
, sleep() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ReportMop::ReportMop()
|
||||||
|
: mset(MBlockReportMutex) {
|
||||||
|
}
|
||||||
|
|
||||||
ReportDesc::~ReportDesc() {
|
ReportDesc::~ReportDesc() {
|
||||||
// FIXME(dvyukov): it must be leaking a lot of memory.
|
// FIXME(dvyukov): it must be leaking a lot of memory.
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef TSAN_GO
|
#ifndef TSAN_GO
|
||||||
|
|
||||||
|
const int kThreadBufSize = 32;
|
||||||
|
const char *thread_name(char *buf, int tid) {
|
||||||
|
if (tid == 0)
|
||||||
|
return "main thread";
|
||||||
|
internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
static void PrintHeader(ReportType typ) {
|
static void PrintHeader(ReportType typ) {
|
||||||
Printf("WARNING: ThreadSanitizer: ");
|
Printf("WARNING: ThreadSanitizer: ");
|
||||||
|
|
||||||
|
|
@ -65,52 +77,69 @@ void PrintStack(const ReportStack *ent) {
|
||||||
Printf("\n");
|
Printf("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void PrintMutexSet(Vector<ReportMopMutex> const& mset) {
|
||||||
|
for (uptr i = 0; i < mset.Size(); i++) {
|
||||||
|
if (i == 0)
|
||||||
|
Printf(" (mutexes:");
|
||||||
|
const ReportMopMutex m = mset[i];
|
||||||
|
Printf(" %s M%llu", m.write ? "write" : "read", m.id);
|
||||||
|
Printf(i == mset.Size() - 1 ? ")" : ",");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void PrintMop(const ReportMop *mop, bool first) {
|
static void PrintMop(const ReportMop *mop, bool first) {
|
||||||
Printf(" %s of size %d at %p",
|
char thrbuf[kThreadBufSize];
|
||||||
|
Printf(" %s of size %d at %p by %s",
|
||||||
(first ? (mop->write ? "Write" : "Read")
|
(first ? (mop->write ? "Write" : "Read")
|
||||||
: (mop->write ? "Previous write" : "Previous read")),
|
: (mop->write ? "Previous write" : "Previous read")),
|
||||||
mop->size, (void*)mop->addr);
|
mop->size, (void*)mop->addr,
|
||||||
if (mop->tid == 0)
|
thread_name(thrbuf, mop->tid));
|
||||||
Printf(" by main thread:\n");
|
PrintMutexSet(mop->mset);
|
||||||
else
|
Printf(":\n");
|
||||||
Printf(" by thread %d:\n", mop->tid);
|
|
||||||
PrintStack(mop->stack);
|
PrintStack(mop->stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void PrintLocation(const ReportLocation *loc) {
|
static void PrintLocation(const ReportLocation *loc) {
|
||||||
|
char thrbuf[kThreadBufSize];
|
||||||
if (loc->type == ReportLocationGlobal) {
|
if (loc->type == ReportLocationGlobal) {
|
||||||
Printf(" Location is global '%s' of size %zu at %zx %s:%d (%s+%p)\n\n",
|
Printf(" Location is global '%s' of size %zu at %zx %s:%d (%s+%p)\n\n",
|
||||||
loc->name, loc->size, loc->addr, loc->file, loc->line,
|
loc->name, loc->size, loc->addr, loc->file, loc->line,
|
||||||
loc->module, loc->offset);
|
loc->module, loc->offset);
|
||||||
} else if (loc->type == ReportLocationHeap) {
|
} else if (loc->type == ReportLocationHeap) {
|
||||||
Printf(" Location is heap block of size %zu at %p allocated",
|
char thrbuf[kThreadBufSize];
|
||||||
loc->size, loc->addr);
|
Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
|
||||||
if (loc->tid == 0)
|
loc->size, loc->addr, thread_name(thrbuf, loc->tid));
|
||||||
Printf(" by main thread:\n");
|
|
||||||
else
|
|
||||||
Printf(" by thread %d:\n", loc->tid);
|
|
||||||
PrintStack(loc->stack);
|
PrintStack(loc->stack);
|
||||||
} else if (loc->type == ReportLocationStack) {
|
} else if (loc->type == ReportLocationStack) {
|
||||||
Printf(" Location is stack of thread %d:\n\n", loc->tid);
|
Printf(" Location is stack of %s\n\n", thread_name(thrbuf, loc->tid));
|
||||||
|
} else if (loc->type == ReportLocationFD) {
|
||||||
|
Printf(" Location is file descriptor %d created by %s at:\n",
|
||||||
|
loc->fd, thread_name(thrbuf, loc->tid));
|
||||||
|
PrintStack(loc->stack);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void PrintMutex(const ReportMutex *rm) {
|
static void PrintMutex(const ReportMutex *rm) {
|
||||||
if (rm->stack == 0)
|
if (rm->destroyed) {
|
||||||
return;
|
Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
|
||||||
Printf(" Mutex %d created at:\n", rm->id);
|
} else {
|
||||||
PrintStack(rm->stack);
|
Printf(" Mutex M%llu created at:\n", rm->id);
|
||||||
|
PrintStack(rm->stack);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void PrintThread(const ReportThread *rt) {
|
static void PrintThread(const ReportThread *rt) {
|
||||||
if (rt->id == 0) // Little sense in describing the main thread.
|
if (rt->id == 0) // Little sense in describing the main thread.
|
||||||
return;
|
return;
|
||||||
Printf(" Thread %d", rt->id);
|
Printf(" Thread T%d", rt->id);
|
||||||
if (rt->name)
|
if (rt->name)
|
||||||
Printf(" '%s'", rt->name);
|
Printf(" '%s'", rt->name);
|
||||||
Printf(" (tid=%zu, %s)", rt->pid, rt->running ? "running" : "finished");
|
char thrbuf[kThreadBufSize];
|
||||||
|
Printf(" (tid=%zu, %s) created by %s",
|
||||||
|
rt->pid, rt->running ? "running" : "finished",
|
||||||
|
thread_name(thrbuf, rt->parent_tid));
|
||||||
if (rt->stack)
|
if (rt->stack)
|
||||||
Printf(" created at:");
|
Printf(" at:");
|
||||||
Printf("\n");
|
Printf("\n");
|
||||||
PrintStack(rt->stack);
|
PrintStack(rt->stack);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -36,20 +36,27 @@ struct ReportStack {
|
||||||
int col;
|
int col;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ReportMopMutex {
|
||||||
|
u64 id;
|
||||||
|
bool write;
|
||||||
|
};
|
||||||
|
|
||||||
struct ReportMop {
|
struct ReportMop {
|
||||||
int tid;
|
int tid;
|
||||||
uptr addr;
|
uptr addr;
|
||||||
int size;
|
int size;
|
||||||
bool write;
|
bool write;
|
||||||
int nmutex;
|
Vector<ReportMopMutex> mset;
|
||||||
int *mutex;
|
|
||||||
ReportStack *stack;
|
ReportStack *stack;
|
||||||
|
|
||||||
|
ReportMop();
|
||||||
};
|
};
|
||||||
|
|
||||||
enum ReportLocationType {
|
enum ReportLocationType {
|
||||||
ReportLocationGlobal,
|
ReportLocationGlobal,
|
||||||
ReportLocationHeap,
|
ReportLocationHeap,
|
||||||
ReportLocationStack
|
ReportLocationStack,
|
||||||
|
ReportLocationFD
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ReportLocation {
|
struct ReportLocation {
|
||||||
|
|
@ -59,6 +66,7 @@ struct ReportLocation {
|
||||||
char *module;
|
char *module;
|
||||||
uptr offset;
|
uptr offset;
|
||||||
int tid;
|
int tid;
|
||||||
|
int fd;
|
||||||
char *name;
|
char *name;
|
||||||
char *file;
|
char *file;
|
||||||
int line;
|
int line;
|
||||||
|
|
@ -70,11 +78,13 @@ struct ReportThread {
|
||||||
uptr pid;
|
uptr pid;
|
||||||
bool running;
|
bool running;
|
||||||
char *name;
|
char *name;
|
||||||
|
int parent_tid;
|
||||||
ReportStack *stack;
|
ReportStack *stack;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ReportMutex {
|
struct ReportMutex {
|
||||||
int id;
|
u64 id;
|
||||||
|
bool destroyed;
|
||||||
ReportStack *stack;
|
ReportStack *stack;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -164,6 +164,16 @@ void MapShadow(uptr addr, uptr size) {
|
||||||
MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
|
MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MapThreadTrace(uptr addr, uptr size) {
|
||||||
|
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
|
||||||
|
CHECK_GE(addr, kTraceMemBegin);
|
||||||
|
CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
|
||||||
|
if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
|
||||||
|
Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
|
||||||
|
Die();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Initialize(ThreadState *thr) {
|
void Initialize(ThreadState *thr) {
|
||||||
// Thread safe because done before all threads exist.
|
// Thread safe because done before all threads exist.
|
||||||
static bool is_initialized = false;
|
static bool is_initialized = false;
|
||||||
|
|
@ -289,6 +299,7 @@ void TraceSwitch(ThreadState *thr) {
|
||||||
TraceHeader *hdr = &thr->trace.headers[trace];
|
TraceHeader *hdr = &thr->trace.headers[trace];
|
||||||
hdr->epoch0 = thr->fast_state.epoch();
|
hdr->epoch0 = thr->fast_state.epoch();
|
||||||
hdr->stack0.ObtainCurrent(thr, 0);
|
hdr->stack0.ObtainCurrent(thr, 0);
|
||||||
|
hdr->mset0 = thr->mset;
|
||||||
thr->nomalloc--;
|
thr->nomalloc--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -443,7 +454,7 @@ ALWAYS_INLINE
|
||||||
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
|
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
|
||||||
int kAccessSizeLog, bool kAccessIsWrite) {
|
int kAccessSizeLog, bool kAccessIsWrite) {
|
||||||
u64 *shadow_mem = (u64*)MemToShadow(addr);
|
u64 *shadow_mem = (u64*)MemToShadow(addr);
|
||||||
DPrintf2("#%d: tsan::OnMemoryAccess: @%p %p size=%d"
|
DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
|
||||||
" is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
|
" is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
|
||||||
(int)thr->fast_state.tid(), (void*)pc, (void*)addr,
|
(int)thr->fast_state.tid(), (void*)pc, (void*)addr,
|
||||||
(int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
|
(int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@
|
||||||
#include "tsan_vector.h"
|
#include "tsan_vector.h"
|
||||||
#include "tsan_report.h"
|
#include "tsan_report.h"
|
||||||
#include "tsan_platform.h"
|
#include "tsan_platform.h"
|
||||||
|
#include "tsan_mutexset.h"
|
||||||
|
|
||||||
#if SANITIZER_WORDSIZE != 64
|
#if SANITIZER_WORDSIZE != 64
|
||||||
# error "ThreadSanitizer is supported only on 64-bit platforms"
|
# error "ThreadSanitizer is supported only on 64-bit platforms"
|
||||||
|
|
@ -48,6 +49,10 @@ struct MBlock {
|
||||||
u32 alloc_tid;
|
u32 alloc_tid;
|
||||||
u32 alloc_stack_id;
|
u32 alloc_stack_id;
|
||||||
SyncVar *head;
|
SyncVar *head;
|
||||||
|
|
||||||
|
MBlock()
|
||||||
|
: mtx(MutexTypeMBlock, StatMtxMBlock) {
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifndef TSAN_GO
|
#ifndef TSAN_GO
|
||||||
|
|
@ -58,10 +63,22 @@ const uptr kAllocatorSpace = 0x7d0000000000ULL;
|
||||||
#endif
|
#endif
|
||||||
const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
|
const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
|
||||||
|
|
||||||
|
struct TsanMapUnmapCallback {
|
||||||
|
void OnMap(uptr p, uptr size) const { }
|
||||||
|
void OnUnmap(uptr p, uptr size) const {
|
||||||
|
// We are about to unmap a chunk of user memory.
|
||||||
|
// Mark the corresponding shadow memory as not needed.
|
||||||
|
uptr shadow_beg = MemToShadow(p);
|
||||||
|
uptr shadow_end = MemToShadow(p + size);
|
||||||
|
CHECK(IsAligned(shadow_end|shadow_beg, GetPageSizeCached()));
|
||||||
|
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
|
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
|
||||||
DefaultSizeClassMap> PrimaryAllocator;
|
DefaultSizeClassMap> PrimaryAllocator;
|
||||||
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||||
typedef LargeMmapAllocator SecondaryAllocator;
|
typedef LargeMmapAllocator<TsanMapUnmapCallback> SecondaryAllocator;
|
||||||
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
||||||
SecondaryAllocator> Allocator;
|
SecondaryAllocator> Allocator;
|
||||||
Allocator *allocator();
|
Allocator *allocator();
|
||||||
|
|
@ -298,6 +315,7 @@ struct ThreadState {
|
||||||
uptr *shadow_stack;
|
uptr *shadow_stack;
|
||||||
uptr *shadow_stack_end;
|
uptr *shadow_stack_end;
|
||||||
#endif
|
#endif
|
||||||
|
MutexSet mset;
|
||||||
ThreadClock clock;
|
ThreadClock clock;
|
||||||
#ifndef TSAN_GO
|
#ifndef TSAN_GO
|
||||||
AllocatorCache alloc_cache;
|
AllocatorCache alloc_cache;
|
||||||
|
|
@ -369,6 +387,7 @@ struct ThreadContext {
|
||||||
u64 epoch0;
|
u64 epoch0;
|
||||||
u64 epoch1;
|
u64 epoch1;
|
||||||
StackTrace creation_stack;
|
StackTrace creation_stack;
|
||||||
|
int creation_tid;
|
||||||
ThreadDeadInfo *dead_info;
|
ThreadDeadInfo *dead_info;
|
||||||
ThreadContext *dead_next; // In dead thread list.
|
ThreadContext *dead_next; // In dead thread list.
|
||||||
char *name; // As annotated by user.
|
char *name; // As annotated by user.
|
||||||
|
|
@ -445,7 +464,8 @@ class ScopedReport {
|
||||||
~ScopedReport();
|
~ScopedReport();
|
||||||
|
|
||||||
void AddStack(const StackTrace *stack);
|
void AddStack(const StackTrace *stack);
|
||||||
void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack);
|
void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
|
||||||
|
const MutexSet *mset);
|
||||||
void AddThread(const ThreadContext *tctx);
|
void AddThread(const ThreadContext *tctx);
|
||||||
void AddMutex(const SyncVar *s);
|
void AddMutex(const SyncVar *s);
|
||||||
void AddLocation(uptr addr, uptr size);
|
void AddLocation(uptr addr, uptr size);
|
||||||
|
|
@ -457,11 +477,13 @@ class ScopedReport {
|
||||||
Context *ctx_;
|
Context *ctx_;
|
||||||
ReportDesc *rep_;
|
ReportDesc *rep_;
|
||||||
|
|
||||||
|
void AddMutex(u64 id);
|
||||||
|
|
||||||
ScopedReport(const ScopedReport&);
|
ScopedReport(const ScopedReport&);
|
||||||
void operator = (const ScopedReport&);
|
void operator = (const ScopedReport&);
|
||||||
};
|
};
|
||||||
|
|
||||||
void RestoreStack(int tid, const u64 epoch, StackTrace *stk);
|
void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset);
|
||||||
|
|
||||||
void StatAggregate(u64 *dst, u64 *src);
|
void StatAggregate(u64 *dst, u64 *src);
|
||||||
void StatOutput(u64 *stat);
|
void StatOutput(u64 *stat);
|
||||||
|
|
@ -471,6 +493,7 @@ void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MapShadow(uptr addr, uptr size);
|
void MapShadow(uptr addr, uptr size);
|
||||||
|
void MapThreadTrace(uptr addr, uptr size);
|
||||||
void InitializeShadowMemory();
|
void InitializeShadowMemory();
|
||||||
void InitializeInterceptors();
|
void InitializeInterceptors();
|
||||||
void InitializeDynamicAnnotations();
|
void InitializeDynamicAnnotations();
|
||||||
|
|
@ -502,6 +525,10 @@ void PrintCurrentStack(ThreadState *thr, uptr pc);
|
||||||
void Initialize(ThreadState *thr);
|
void Initialize(ThreadState *thr);
|
||||||
int Finalize(ThreadState *thr);
|
int Finalize(ThreadState *thr);
|
||||||
|
|
||||||
|
SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
|
||||||
|
bool write_lock, bool create);
|
||||||
|
SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
|
||||||
|
|
||||||
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
|
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
|
||||||
int kAccessSizeLog, bool kAccessIsWrite);
|
int kAccessSizeLog, bool kAccessIsWrite);
|
||||||
void MemoryAccessImpl(ThreadState *thr, uptr addr,
|
void MemoryAccessImpl(ThreadState *thr, uptr addr,
|
||||||
|
|
@ -575,7 +602,10 @@ uptr TraceParts();
|
||||||
|
|
||||||
extern "C" void __tsan_trace_switch();
|
extern "C" void __tsan_trace_switch();
|
||||||
void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs,
|
void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs,
|
||||||
EventType typ, uptr addr) {
|
EventType typ, u64 addr) {
|
||||||
|
DCHECK_GE((int)typ, 0);
|
||||||
|
DCHECK_LE((int)typ, 7);
|
||||||
|
DCHECK_EQ(GetLsb(addr, 61), addr);
|
||||||
StatInc(thr, StatEvents);
|
StatInc(thr, StatEvents);
|
||||||
u64 pos = fs.GetTracePos();
|
u64 pos = fs.GetTracePos();
|
||||||
if (UNLIKELY((pos % kTracePartSize) == 0)) {
|
if (UNLIKELY((pos % kTracePartSize) == 0)) {
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
|
||||||
StatInc(thr, StatMutexCreate);
|
StatInc(thr, StatMutexCreate);
|
||||||
if (!linker_init && IsAppMem(addr))
|
if (!linker_init && IsAppMem(addr))
|
||||||
MemoryWrite1Byte(thr, pc, addr);
|
MemoryWrite1Byte(thr, pc, addr);
|
||||||
SyncVar *s = ctx->synctab.GetAndLock(thr, pc, addr, true);
|
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
|
||||||
s->is_rw = rw;
|
s->is_rw = rw;
|
||||||
s->is_recursive = recursive;
|
s->is_recursive = recursive;
|
||||||
s->is_linker_init = linker_init;
|
s->is_linker_init = linker_init;
|
||||||
|
|
@ -59,11 +59,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
trace.ObtainCurrent(thr, pc);
|
trace.ObtainCurrent(thr, pc);
|
||||||
rep.AddStack(&trace);
|
rep.AddStack(&trace);
|
||||||
FastState last(s->last_lock);
|
FastState last(s->last_lock);
|
||||||
RestoreStack(last.tid(), last.epoch(), &trace);
|
RestoreStack(last.tid(), last.epoch(), &trace, 0);
|
||||||
rep.AddStack(&trace);
|
rep.AddStack(&trace);
|
||||||
rep.AddLocation(s->addr, 1);
|
rep.AddLocation(s->addr, 1);
|
||||||
OutputReport(ctx, rep);
|
OutputReport(ctx, rep);
|
||||||
}
|
}
|
||||||
|
thr->mset.Remove(s->GetId());
|
||||||
DestroyAndFree(s);
|
DestroyAndFree(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -72,9 +73,9 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
DPrintf("#%d: MutexLock %zx\n", thr->tid, addr);
|
DPrintf("#%d: MutexLock %zx\n", thr->tid, addr);
|
||||||
if (IsAppMem(addr))
|
if (IsAppMem(addr))
|
||||||
MemoryRead1Byte(thr, pc, addr);
|
MemoryRead1Byte(thr, pc, addr);
|
||||||
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
|
||||||
thr->fast_state.IncrementEpoch();
|
thr->fast_state.IncrementEpoch();
|
||||||
TraceAddEvent(thr, thr->fast_state, EventTypeLock, addr);
|
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
|
||||||
if (s->owner_tid == SyncVar::kInvalidTid) {
|
if (s->owner_tid == SyncVar::kInvalidTid) {
|
||||||
CHECK_EQ(s->recursion, 0);
|
CHECK_EQ(s->recursion, 0);
|
||||||
s->owner_tid = thr->tid;
|
s->owner_tid = thr->tid;
|
||||||
|
|
@ -96,6 +97,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
StatInc(thr, StatMutexRecLock);
|
StatInc(thr, StatMutexRecLock);
|
||||||
}
|
}
|
||||||
s->recursion++;
|
s->recursion++;
|
||||||
|
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
|
||||||
s->mtx.Unlock();
|
s->mtx.Unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -104,9 +106,9 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr);
|
DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr);
|
||||||
if (IsAppMem(addr))
|
if (IsAppMem(addr))
|
||||||
MemoryRead1Byte(thr, pc, addr);
|
MemoryRead1Byte(thr, pc, addr);
|
||||||
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
|
||||||
thr->fast_state.IncrementEpoch();
|
thr->fast_state.IncrementEpoch();
|
||||||
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
|
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
|
||||||
if (s->recursion == 0) {
|
if (s->recursion == 0) {
|
||||||
if (!s->is_broken) {
|
if (!s->is_broken) {
|
||||||
s->is_broken = true;
|
s->is_broken = true;
|
||||||
|
|
@ -132,6 +134,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
StatInc(thr, StatMutexRecUnlock);
|
StatInc(thr, StatMutexRecUnlock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
thr->mset.Del(s->GetId(), true);
|
||||||
s->mtx.Unlock();
|
s->mtx.Unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -141,9 +144,9 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
StatInc(thr, StatMutexReadLock);
|
StatInc(thr, StatMutexReadLock);
|
||||||
if (IsAppMem(addr))
|
if (IsAppMem(addr))
|
||||||
MemoryRead1Byte(thr, pc, addr);
|
MemoryRead1Byte(thr, pc, addr);
|
||||||
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
|
||||||
thr->fast_state.IncrementEpoch();
|
thr->fast_state.IncrementEpoch();
|
||||||
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, addr);
|
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
|
|
||||||
if (s->owner_tid != SyncVar::kInvalidTid) {
|
if (s->owner_tid != SyncVar::kInvalidTid) {
|
||||||
Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
|
Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
|
||||||
PrintCurrentStack(thr, pc);
|
PrintCurrentStack(thr, pc);
|
||||||
|
|
@ -152,6 +155,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
thr->clock.acquire(&s->clock);
|
thr->clock.acquire(&s->clock);
|
||||||
s->last_lock = thr->fast_state.raw();
|
s->last_lock = thr->fast_state.raw();
|
||||||
StatInc(thr, StatSyncAcquire);
|
StatInc(thr, StatSyncAcquire);
|
||||||
|
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
|
||||||
s->mtx.ReadUnlock();
|
s->mtx.ReadUnlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -161,9 +165,9 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
StatInc(thr, StatMutexReadUnlock);
|
StatInc(thr, StatMutexReadUnlock);
|
||||||
if (IsAppMem(addr))
|
if (IsAppMem(addr))
|
||||||
MemoryRead1Byte(thr, pc, addr);
|
MemoryRead1Byte(thr, pc, addr);
|
||||||
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
|
||||||
thr->fast_state.IncrementEpoch();
|
thr->fast_state.IncrementEpoch();
|
||||||
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr);
|
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
|
||||||
if (s->owner_tid != SyncVar::kInvalidTid) {
|
if (s->owner_tid != SyncVar::kInvalidTid) {
|
||||||
Printf("ThreadSanitizer WARNING: read unlock of a write "
|
Printf("ThreadSanitizer WARNING: read unlock of a write "
|
||||||
"locked mutex\n");
|
"locked mutex\n");
|
||||||
|
|
@ -174,6 +178,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
thr->clock.release(&s->read_clock);
|
thr->clock.release(&s->read_clock);
|
||||||
StatInc(thr, StatSyncRelease);
|
StatInc(thr, StatSyncRelease);
|
||||||
s->mtx.Unlock();
|
s->mtx.Unlock();
|
||||||
|
thr->mset.Del(s->GetId(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
|
|
@ -181,18 +186,22 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
|
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
|
||||||
if (IsAppMem(addr))
|
if (IsAppMem(addr))
|
||||||
MemoryRead1Byte(thr, pc, addr);
|
MemoryRead1Byte(thr, pc, addr);
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
|
||||||
|
bool write = true;
|
||||||
if (s->owner_tid == SyncVar::kInvalidTid) {
|
if (s->owner_tid == SyncVar::kInvalidTid) {
|
||||||
// Seems to be read unlock.
|
// Seems to be read unlock.
|
||||||
|
write = false;
|
||||||
StatInc(thr, StatMutexReadUnlock);
|
StatInc(thr, StatMutexReadUnlock);
|
||||||
thr->fast_state.IncrementEpoch();
|
thr->fast_state.IncrementEpoch();
|
||||||
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr);
|
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
|
||||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||||
thr->clock.release(&s->read_clock);
|
thr->clock.release(&s->read_clock);
|
||||||
StatInc(thr, StatSyncRelease);
|
StatInc(thr, StatSyncRelease);
|
||||||
} else if (s->owner_tid == thr->tid) {
|
} else if (s->owner_tid == thr->tid) {
|
||||||
// Seems to be write unlock.
|
// Seems to be write unlock.
|
||||||
|
thr->fast_state.IncrementEpoch();
|
||||||
|
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
|
||||||
CHECK_GT(s->recursion, 0);
|
CHECK_GT(s->recursion, 0);
|
||||||
s->recursion--;
|
s->recursion--;
|
||||||
if (s->recursion == 0) {
|
if (s->recursion == 0) {
|
||||||
|
|
@ -202,8 +211,6 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
// The sequence of events is quite tricky and doubled in several places.
|
// The sequence of events is quite tricky and doubled in several places.
|
||||||
// First, it's a bug to increment the epoch w/o writing to the trace.
|
// First, it's a bug to increment the epoch w/o writing to the trace.
|
||||||
// Then, the acquire/release logic can be factored out as well.
|
// Then, the acquire/release logic can be factored out as well.
|
||||||
thr->fast_state.IncrementEpoch();
|
|
||||||
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
|
|
||||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||||
thr->fast_synch_epoch = thr->fast_state.epoch();
|
thr->fast_synch_epoch = thr->fast_state.epoch();
|
||||||
thr->clock.ReleaseStore(&s->clock);
|
thr->clock.ReleaseStore(&s->clock);
|
||||||
|
|
@ -216,13 +223,14 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
|
Printf("ThreadSanitizer WARNING: mutex unlock by another thread\n");
|
||||||
PrintCurrentStack(thr, pc);
|
PrintCurrentStack(thr, pc);
|
||||||
}
|
}
|
||||||
|
thr->mset.Del(s->GetId(), write);
|
||||||
s->mtx.Unlock();
|
s->mtx.Unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Acquire(ThreadState *thr, uptr pc, uptr addr) {
|
void Acquire(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
CHECK_GT(thr->in_rtl, 0);
|
CHECK_GT(thr->in_rtl, 0);
|
||||||
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
|
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
|
||||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||||
thr->clock.acquire(&s->clock);
|
thr->clock.acquire(&s->clock);
|
||||||
StatInc(thr, StatSyncAcquire);
|
StatInc(thr, StatSyncAcquire);
|
||||||
|
|
@ -246,7 +254,7 @@ void AcquireGlobal(ThreadState *thr, uptr pc) {
|
||||||
void Release(ThreadState *thr, uptr pc, uptr addr) {
|
void Release(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
CHECK_GT(thr->in_rtl, 0);
|
CHECK_GT(thr->in_rtl, 0);
|
||||||
DPrintf("#%d: Release %zx\n", thr->tid, addr);
|
DPrintf("#%d: Release %zx\n", thr->tid, addr);
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
|
||||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||||
thr->clock.release(&s->clock);
|
thr->clock.release(&s->clock);
|
||||||
StatInc(thr, StatSyncRelease);
|
StatInc(thr, StatSyncRelease);
|
||||||
|
|
@ -256,7 +264,7 @@ void Release(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
|
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
CHECK_GT(thr->in_rtl, 0);
|
CHECK_GT(thr->in_rtl, 0);
|
||||||
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
|
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
|
||||||
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
|
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
|
||||||
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
thr->clock.set(thr->tid, thr->fast_state.epoch());
|
||||||
thr->clock.ReleaseStore(&s->clock);
|
thr->clock.ReleaseStore(&s->clock);
|
||||||
StatInc(thr, StatSyncRelease);
|
StatInc(thr, StatSyncRelease);
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,7 @@
|
||||||
#include "sanitizer_common/sanitizer_libc.h"
|
#include "sanitizer_common/sanitizer_libc.h"
|
||||||
#include "sanitizer_common/sanitizer_placement_new.h"
|
#include "sanitizer_common/sanitizer_placement_new.h"
|
||||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||||
|
#include "sanitizer_common/sanitizer_common.h"
|
||||||
#include "tsan_platform.h"
|
#include "tsan_platform.h"
|
||||||
#include "tsan_rtl.h"
|
#include "tsan_rtl.h"
|
||||||
#include "tsan_suppressions.h"
|
#include "tsan_suppressions.h"
|
||||||
|
|
@ -20,9 +21,12 @@
|
||||||
#include "tsan_sync.h"
|
#include "tsan_sync.h"
|
||||||
#include "tsan_mman.h"
|
#include "tsan_mman.h"
|
||||||
#include "tsan_flags.h"
|
#include "tsan_flags.h"
|
||||||
|
#include "tsan_fd.h"
|
||||||
|
|
||||||
namespace __tsan {
|
namespace __tsan {
|
||||||
|
|
||||||
|
using namespace __sanitizer; // NOLINT
|
||||||
|
|
||||||
void TsanCheckFailed(const char *file, int line, const char *cond,
|
void TsanCheckFailed(const char *file, int line, const char *cond,
|
||||||
u64 v1, u64 v2) {
|
u64 v1, u64 v2) {
|
||||||
ScopedInRtl in_rtl;
|
ScopedInRtl in_rtl;
|
||||||
|
|
@ -132,7 +136,7 @@ void ScopedReport::AddStack(const StackTrace *stack) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
|
void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
|
||||||
const StackTrace *stack) {
|
const StackTrace *stack, const MutexSet *mset) {
|
||||||
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
|
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
|
||||||
ReportMop *mop = new(mem) ReportMop;
|
ReportMop *mop = new(mem) ReportMop;
|
||||||
rep_->mops.PushBack(mop);
|
rep_->mops.PushBack(mop);
|
||||||
|
|
@ -140,8 +144,27 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
|
||||||
mop->addr = addr + s.addr0();
|
mop->addr = addr + s.addr0();
|
||||||
mop->size = s.size();
|
mop->size = s.size();
|
||||||
mop->write = s.is_write();
|
mop->write = s.is_write();
|
||||||
mop->nmutex = 0;
|
|
||||||
mop->stack = SymbolizeStack(*stack);
|
mop->stack = SymbolizeStack(*stack);
|
||||||
|
for (uptr i = 0; i < mset->Size(); i++) {
|
||||||
|
MutexSet::Desc d = mset->Get(i);
|
||||||
|
u64 uid = 0;
|
||||||
|
uptr addr = SyncVar::SplitId(d.id, &uid);
|
||||||
|
SyncVar *s = ctx_->synctab.GetIfExistsAndLock(addr, false);
|
||||||
|
// Check that the mutex is still alive.
|
||||||
|
// Another mutex can be created at the same address,
|
||||||
|
// so check uid as well.
|
||||||
|
if (s && s->CheckId(uid)) {
|
||||||
|
ReportMopMutex mtx = {s->uid, d.write};
|
||||||
|
mop->mset.PushBack(mtx);
|
||||||
|
AddMutex(s);
|
||||||
|
} else {
|
||||||
|
ReportMopMutex mtx = {d.id, d.write};
|
||||||
|
mop->mset.PushBack(mtx);
|
||||||
|
AddMutex(d.id);
|
||||||
|
}
|
||||||
|
if (s)
|
||||||
|
s->mtx.ReadUnlock();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScopedReport::AddThread(const ThreadContext *tctx) {
|
void ScopedReport::AddThread(const ThreadContext *tctx) {
|
||||||
|
|
@ -156,6 +179,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
|
||||||
rt->pid = tctx->os_id;
|
rt->pid = tctx->os_id;
|
||||||
rt->running = (tctx->status == ThreadStatusRunning);
|
rt->running = (tctx->status == ThreadStatusRunning);
|
||||||
rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
|
rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
|
||||||
|
rt->parent_tid = tctx->creation_tid;
|
||||||
rt->stack = SymbolizeStack(tctx->creation_stack);
|
rt->stack = SymbolizeStack(tctx->creation_stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -173,17 +197,58 @@ static ThreadContext *FindThread(int unique_id) {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void ScopedReport::AddMutex(const SyncVar *s) {
|
void ScopedReport::AddMutex(const SyncVar *s) {
|
||||||
|
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
|
||||||
|
if (rep_->mutexes[i]->id == s->uid)
|
||||||
|
return;
|
||||||
|
}
|
||||||
void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
|
void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
|
||||||
ReportMutex *rm = new(mem) ReportMutex();
|
ReportMutex *rm = new(mem) ReportMutex();
|
||||||
rep_->mutexes.PushBack(rm);
|
rep_->mutexes.PushBack(rm);
|
||||||
rm->id = 42;
|
rm->id = s->uid;
|
||||||
|
rm->destroyed = false;
|
||||||
rm->stack = SymbolizeStack(s->creation_stack);
|
rm->stack = SymbolizeStack(s->creation_stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ScopedReport::AddMutex(u64 id) {
|
||||||
|
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
|
||||||
|
if (rep_->mutexes[i]->id == id)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
|
||||||
|
ReportMutex *rm = new(mem) ReportMutex();
|
||||||
|
rep_->mutexes.PushBack(rm);
|
||||||
|
rm->id = id;
|
||||||
|
rm->destroyed = true;
|
||||||
|
rm->stack = 0;
|
||||||
|
}
|
||||||
|
|
||||||
void ScopedReport::AddLocation(uptr addr, uptr size) {
|
void ScopedReport::AddLocation(uptr addr, uptr size) {
|
||||||
if (addr == 0)
|
if (addr == 0)
|
||||||
return;
|
return;
|
||||||
#ifndef TSAN_GO
|
#ifndef TSAN_GO
|
||||||
|
int fd = -1;
|
||||||
|
int creat_tid = -1;
|
||||||
|
u32 creat_stack = 0;
|
||||||
|
if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
|
||||||
|
|| FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
|
||||||
|
void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
|
||||||
|
ReportLocation *loc = new(mem) ReportLocation();
|
||||||
|
rep_->locs.PushBack(loc);
|
||||||
|
loc->type = ReportLocationFD;
|
||||||
|
loc->fd = fd;
|
||||||
|
loc->tid = creat_tid;
|
||||||
|
uptr ssz = 0;
|
||||||
|
const uptr *stack = StackDepotGet(creat_stack, &ssz);
|
||||||
|
if (stack) {
|
||||||
|
StackTrace trace;
|
||||||
|
trace.Init(stack, ssz);
|
||||||
|
loc->stack = SymbolizeStack(trace);
|
||||||
|
}
|
||||||
|
ThreadContext *tctx = FindThread(creat_tid);
|
||||||
|
if (tctx)
|
||||||
|
AddThread(tctx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (allocator()->PointerIsMine((void*)addr)) {
|
if (allocator()->PointerIsMine((void*)addr)) {
|
||||||
MBlock *b = user_mblock(0, (void*)addr);
|
MBlock *b = user_mblock(0, (void*)addr);
|
||||||
ThreadContext *tctx = FindThread(b->alloc_tid);
|
ThreadContext *tctx = FindThread(b->alloc_tid);
|
||||||
|
|
@ -246,7 +311,10 @@ const ReportDesc *ScopedReport::GetReport() const {
|
||||||
return rep_;
|
return rep_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
|
void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) {
|
||||||
|
// This function restores stack trace and mutex set for the thread/epoch.
|
||||||
|
// It does so by getting stack trace and mutex set at the beginning of
|
||||||
|
// trace part, and then replaying the trace till the given epoch.
|
||||||
ThreadContext *tctx = CTX()->threads[tid];
|
ThreadContext *tctx = CTX()->threads[tid];
|
||||||
if (tctx == 0)
|
if (tctx == 0)
|
||||||
return;
|
return;
|
||||||
|
|
@ -267,6 +335,7 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
|
||||||
TraceHeader* hdr = &trace->headers[partidx];
|
TraceHeader* hdr = &trace->headers[partidx];
|
||||||
if (epoch < hdr->epoch0)
|
if (epoch < hdr->epoch0)
|
||||||
return;
|
return;
|
||||||
|
const u64 epoch0 = RoundDown(epoch, TraceSize());
|
||||||
const u64 eend = epoch % TraceSize();
|
const u64 eend = epoch % TraceSize();
|
||||||
const u64 ebegin = RoundDown(eend, kTracePartSize);
|
const u64 ebegin = RoundDown(eend, kTracePartSize);
|
||||||
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
|
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
|
||||||
|
|
@ -276,12 +345,14 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
|
||||||
stack[i] = hdr->stack0.Get(i);
|
stack[i] = hdr->stack0.Get(i);
|
||||||
DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
|
DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
|
||||||
}
|
}
|
||||||
|
if (mset)
|
||||||
|
*mset = hdr->mset0;
|
||||||
uptr pos = hdr->stack0.Size();
|
uptr pos = hdr->stack0.Size();
|
||||||
Event *events = (Event*)GetThreadTrace(tid);
|
Event *events = (Event*)GetThreadTrace(tid);
|
||||||
for (uptr i = ebegin; i <= eend; i++) {
|
for (uptr i = ebegin; i <= eend; i++) {
|
||||||
Event ev = events[i];
|
Event ev = events[i];
|
||||||
EventType typ = (EventType)(ev >> 61);
|
EventType typ = (EventType)(ev >> 61);
|
||||||
uptr pc = (uptr)(ev & 0xffffffffffffull);
|
uptr pc = (uptr)(ev & ((1ull << 61) - 1));
|
||||||
DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
|
DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
|
||||||
if (typ == EventTypeMop) {
|
if (typ == EventTypeMop) {
|
||||||
stack[pos] = pc;
|
stack[pos] = pc;
|
||||||
|
|
@ -291,6 +362,17 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
|
||||||
if (pos > 0)
|
if (pos > 0)
|
||||||
pos--;
|
pos--;
|
||||||
}
|
}
|
||||||
|
if (mset) {
|
||||||
|
if (typ == EventTypeLock) {
|
||||||
|
mset->Add(pc, true, epoch0 + i);
|
||||||
|
} else if (typ == EventTypeUnlock) {
|
||||||
|
mset->Del(pc, true);
|
||||||
|
} else if (typ == EventTypeRLock) {
|
||||||
|
mset->Add(pc, false, epoch0 + i);
|
||||||
|
} else if (typ == EventTypeRUnlock) {
|
||||||
|
mset->Del(pc, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
for (uptr j = 0; j <= pos; j++)
|
for (uptr j = 0; j <= pos; j++)
|
||||||
DPrintf2(" #%zu: %zx\n", j, stack[j]);
|
DPrintf2(" #%zu: %zx\n", j, stack[j]);
|
||||||
}
|
}
|
||||||
|
|
@ -400,8 +482,11 @@ static bool IsJavaNonsense(const ReportDesc *rep) {
|
||||||
if (frame != 0 && frame->func != 0
|
if (frame != 0 && frame->func != 0
|
||||||
&& (internal_strcmp(frame->func, "memset") == 0
|
&& (internal_strcmp(frame->func, "memset") == 0
|
||||||
|| internal_strcmp(frame->func, "memcpy") == 0
|
|| internal_strcmp(frame->func, "memcpy") == 0
|
||||||
|
|| internal_strcmp(frame->func, "memmove") == 0
|
||||||
|| internal_strcmp(frame->func, "strcmp") == 0
|
|| internal_strcmp(frame->func, "strcmp") == 0
|
||||||
|| internal_strcmp(frame->func, "strncpy") == 0
|
|| internal_strcmp(frame->func, "strncpy") == 0
|
||||||
|
|| internal_strcmp(frame->func, "strlen") == 0
|
||||||
|
|| internal_strcmp(frame->func, "free") == 0
|
||||||
|| internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) {
|
|| internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) {
|
||||||
frame = frame->next;
|
frame = frame->next;
|
||||||
if (frame == 0
|
if (frame == 0
|
||||||
|
|
@ -423,6 +508,10 @@ void ReportRace(ThreadState *thr) {
|
||||||
return;
|
return;
|
||||||
ScopedInRtl in_rtl;
|
ScopedInRtl in_rtl;
|
||||||
|
|
||||||
|
if (thr->in_signal_handler)
|
||||||
|
Printf("ThreadSanitizer: printing report from signal handler."
|
||||||
|
" Can crash or hang.\n");
|
||||||
|
|
||||||
bool freed = false;
|
bool freed = false;
|
||||||
{
|
{
|
||||||
Shadow s(thr->racy_state[1]);
|
Shadow s(thr->racy_state[1]);
|
||||||
|
|
@ -454,15 +543,18 @@ void ReportRace(ThreadState *thr) {
|
||||||
traces[0].ObtainCurrent(thr, toppc);
|
traces[0].ObtainCurrent(thr, toppc);
|
||||||
if (IsFiredSuppression(ctx, rep, traces[0]))
|
if (IsFiredSuppression(ctx, rep, traces[0]))
|
||||||
return;
|
return;
|
||||||
|
InternalScopedBuffer<MutexSet> mset2(1);
|
||||||
|
new(mset2.data()) MutexSet();
|
||||||
Shadow s2(thr->racy_state[1]);
|
Shadow s2(thr->racy_state[1]);
|
||||||
RestoreStack(s2.tid(), s2.epoch(), &traces[1]);
|
RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data());
|
||||||
|
|
||||||
if (HandleRacyStacks(thr, traces, addr_min, addr_max))
|
if (HandleRacyStacks(thr, traces, addr_min, addr_max))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (uptr i = 0; i < kMop; i++) {
|
for (uptr i = 0; i < kMop; i++) {
|
||||||
Shadow s(thr->racy_state[i]);
|
Shadow s(thr->racy_state[i]);
|
||||||
rep.AddMemoryAccess(addr, s, &traces[i]);
|
rep.AddMemoryAccess(addr, s, &traces[i],
|
||||||
|
i == 0 ? &thr->mset : mset2.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
|
if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
|
||||||
|
|
|
||||||
|
|
@ -154,6 +154,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
|
||||||
thr->clock.release(&tctx->sync);
|
thr->clock.release(&tctx->sync);
|
||||||
StatInc(thr, StatSyncRelease);
|
StatInc(thr, StatSyncRelease);
|
||||||
tctx->creation_stack.ObtainCurrent(thr, pc);
|
tctx->creation_stack.ObtainCurrent(thr, pc);
|
||||||
|
tctx->creation_tid = thr->tid;
|
||||||
}
|
}
|
||||||
return tid;
|
return tid;
|
||||||
}
|
}
|
||||||
|
|
@ -303,6 +304,7 @@ void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
|
||||||
Printf("ThreadSanitizer: join of non-existent thread\n");
|
Printf("ThreadSanitizer: join of non-existent thread\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
// FIXME(dvyukov): print message and continue (it's user error).
|
||||||
CHECK_EQ(tctx->detached, false);
|
CHECK_EQ(tctx->detached, false);
|
||||||
CHECK_EQ(tctx->status, ThreadStatusFinished);
|
CHECK_EQ(tctx->status, ThreadStatusFinished);
|
||||||
thr->clock.acquire(&tctx->sync);
|
thr->clock.acquire(&tctx->sync);
|
||||||
|
|
|
||||||
|
|
@ -179,6 +179,28 @@ void StatOutput(u64 *stat) {
|
||||||
name[StatInt_sem_timedwait] = " sem_timedwait ";
|
name[StatInt_sem_timedwait] = " sem_timedwait ";
|
||||||
name[StatInt_sem_post] = " sem_post ";
|
name[StatInt_sem_post] = " sem_post ";
|
||||||
name[StatInt_sem_getvalue] = " sem_getvalue ";
|
name[StatInt_sem_getvalue] = " sem_getvalue ";
|
||||||
|
name[StatInt_open] = " open ";
|
||||||
|
name[StatInt_open64] = " open64 ";
|
||||||
|
name[StatInt_creat] = " creat ";
|
||||||
|
name[StatInt_creat64] = " creat64 ";
|
||||||
|
name[StatInt_dup] = " dup ";
|
||||||
|
name[StatInt_dup2] = " dup2 ";
|
||||||
|
name[StatInt_dup3] = " dup3 ";
|
||||||
|
name[StatInt_eventfd] = " eventfd ";
|
||||||
|
name[StatInt_signalfd] = " signalfd ";
|
||||||
|
name[StatInt_inotify_init] = " inotify_init ";
|
||||||
|
name[StatInt_inotify_init1] = " inotify_init1 ";
|
||||||
|
name[StatInt_socket] = " socket ";
|
||||||
|
name[StatInt_socketpair] = " socketpair ";
|
||||||
|
name[StatInt_connect] = " connect ";
|
||||||
|
name[StatInt_accept] = " accept ";
|
||||||
|
name[StatInt_accept4] = " accept4 ";
|
||||||
|
name[StatInt_epoll_create] = " epoll_create ";
|
||||||
|
name[StatInt_epoll_create1] = " epoll_create1 ";
|
||||||
|
name[StatInt_close] = " close ";
|
||||||
|
name[StatInt___close] = " __close ";
|
||||||
|
name[StatInt_pipe] = " pipe ";
|
||||||
|
name[StatInt_pipe2] = " pipe2 ";
|
||||||
name[StatInt_read] = " read ";
|
name[StatInt_read] = " read ";
|
||||||
name[StatInt_pread] = " pread ";
|
name[StatInt_pread] = " pread ";
|
||||||
name[StatInt_pread64] = " pread64 ";
|
name[StatInt_pread64] = " pread64 ";
|
||||||
|
|
@ -195,6 +217,8 @@ void StatOutput(u64 *stat) {
|
||||||
name[StatInt_recvmsg] = " recvmsg ";
|
name[StatInt_recvmsg] = " recvmsg ";
|
||||||
name[StatInt_unlink] = " unlink ";
|
name[StatInt_unlink] = " unlink ";
|
||||||
name[StatInt_fopen] = " fopen ";
|
name[StatInt_fopen] = " fopen ";
|
||||||
|
name[StatInt_freopen] = " freopen ";
|
||||||
|
name[StatInt_fclose] = " fclose ";
|
||||||
name[StatInt_fread] = " fread ";
|
name[StatInt_fread] = " fread ";
|
||||||
name[StatInt_fwrite] = " fwrite ";
|
name[StatInt_fwrite] = " fwrite ";
|
||||||
name[StatInt_puts] = " puts ";
|
name[StatInt_puts] = " puts ";
|
||||||
|
|
@ -208,6 +232,7 @@ void StatOutput(u64 *stat) {
|
||||||
name[StatInt_usleep] = " usleep ";
|
name[StatInt_usleep] = " usleep ";
|
||||||
name[StatInt_nanosleep] = " nanosleep ";
|
name[StatInt_nanosleep] = " nanosleep ";
|
||||||
name[StatInt_gettimeofday] = " gettimeofday ";
|
name[StatInt_gettimeofday] = " gettimeofday ";
|
||||||
|
name[StatInt_fork] = " fork ";
|
||||||
|
|
||||||
name[StatAnnotation] = "Dynamic annotations ";
|
name[StatAnnotation] = "Dynamic annotations ";
|
||||||
name[StatAnnotateHappensBefore] = " HappensBefore ";
|
name[StatAnnotateHappensBefore] = " HappensBefore ";
|
||||||
|
|
@ -251,6 +276,8 @@ void StatOutput(u64 *stat) {
|
||||||
name[StatMtxSlab] = " Slab ";
|
name[StatMtxSlab] = " Slab ";
|
||||||
name[StatMtxAtExit] = " Atexit ";
|
name[StatMtxAtExit] = " Atexit ";
|
||||||
name[StatMtxAnnotations] = " Annotations ";
|
name[StatMtxAnnotations] = " Annotations ";
|
||||||
|
name[StatMtxMBlock] = " MBlock ";
|
||||||
|
name[StatMtxJavaMBlock] = " JavaMBlock ";
|
||||||
|
|
||||||
Printf("Statistics:\n");
|
Printf("Statistics:\n");
|
||||||
for (int i = 0; i < StatCnt; i++)
|
for (int i = 0; i < StatCnt; i++)
|
||||||
|
|
|
||||||
|
|
@ -174,6 +174,28 @@ enum StatType {
|
||||||
StatInt_sem_timedwait,
|
StatInt_sem_timedwait,
|
||||||
StatInt_sem_post,
|
StatInt_sem_post,
|
||||||
StatInt_sem_getvalue,
|
StatInt_sem_getvalue,
|
||||||
|
StatInt_open,
|
||||||
|
StatInt_open64,
|
||||||
|
StatInt_creat,
|
||||||
|
StatInt_creat64,
|
||||||
|
StatInt_dup,
|
||||||
|
StatInt_dup2,
|
||||||
|
StatInt_dup3,
|
||||||
|
StatInt_eventfd,
|
||||||
|
StatInt_signalfd,
|
||||||
|
StatInt_inotify_init,
|
||||||
|
StatInt_inotify_init1,
|
||||||
|
StatInt_socket,
|
||||||
|
StatInt_socketpair,
|
||||||
|
StatInt_connect,
|
||||||
|
StatInt_accept,
|
||||||
|
StatInt_accept4,
|
||||||
|
StatInt_epoll_create,
|
||||||
|
StatInt_epoll_create1,
|
||||||
|
StatInt_close,
|
||||||
|
StatInt___close,
|
||||||
|
StatInt_pipe,
|
||||||
|
StatInt_pipe2,
|
||||||
StatInt_read,
|
StatInt_read,
|
||||||
StatInt_pread,
|
StatInt_pread,
|
||||||
StatInt_pread64,
|
StatInt_pread64,
|
||||||
|
|
@ -190,6 +212,8 @@ enum StatType {
|
||||||
StatInt_recvmsg,
|
StatInt_recvmsg,
|
||||||
StatInt_unlink,
|
StatInt_unlink,
|
||||||
StatInt_fopen,
|
StatInt_fopen,
|
||||||
|
StatInt_freopen,
|
||||||
|
StatInt_fclose,
|
||||||
StatInt_fread,
|
StatInt_fread,
|
||||||
StatInt_fwrite,
|
StatInt_fwrite,
|
||||||
StatInt_puts,
|
StatInt_puts,
|
||||||
|
|
@ -207,6 +231,7 @@ enum StatType {
|
||||||
StatInt_usleep,
|
StatInt_usleep,
|
||||||
StatInt_nanosleep,
|
StatInt_nanosleep,
|
||||||
StatInt_gettimeofday,
|
StatInt_gettimeofday,
|
||||||
|
StatInt_fork,
|
||||||
|
|
||||||
// Dynamic annotations.
|
// Dynamic annotations.
|
||||||
StatAnnotation,
|
StatAnnotation,
|
||||||
|
|
@ -253,6 +278,8 @@ enum StatType {
|
||||||
StatMtxSlab,
|
StatMtxSlab,
|
||||||
StatMtxAnnotations,
|
StatMtxAnnotations,
|
||||||
StatMtxAtExit,
|
StatMtxAtExit,
|
||||||
|
StatMtxMBlock,
|
||||||
|
StatMtxJavaMBlock,
|
||||||
|
|
||||||
// This must be the last.
|
// This must be the last.
|
||||||
StatCnt
|
StatCnt
|
||||||
|
|
|
||||||
|
|
@ -102,11 +102,11 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
|
||||||
m->base = (uptr)info->dlpi_addr;
|
m->base = (uptr)info->dlpi_addr;
|
||||||
m->inp_fd = -1;
|
m->inp_fd = -1;
|
||||||
m->out_fd = -1;
|
m->out_fd = -1;
|
||||||
DPrintf("Module %s %zx\n", m->name, m->base);
|
DPrintf2("Module %s %zx\n", m->name, m->base);
|
||||||
for (int i = 0; i < info->dlpi_phnum; i++) {
|
for (int i = 0; i < info->dlpi_phnum; i++) {
|
||||||
const Elf64_Phdr *s = &info->dlpi_phdr[i];
|
const Elf64_Phdr *s = &info->dlpi_phdr[i];
|
||||||
DPrintf(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx"
|
DPrintf2(" Section p_type=%zx p_offset=%zx p_vaddr=%zx p_paddr=%zx"
|
||||||
" p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n",
|
" p_filesz=%zx p_memsz=%zx p_flags=%zx p_align=%zx\n",
|
||||||
(uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr,
|
(uptr)s->p_type, (uptr)s->p_offset, (uptr)s->p_vaddr,
|
||||||
(uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz,
|
(uptr)s->p_paddr, (uptr)s->p_filesz, (uptr)s->p_memsz,
|
||||||
(uptr)s->p_flags, (uptr)s->p_align);
|
(uptr)s->p_flags, (uptr)s->p_align);
|
||||||
|
|
@ -119,7 +119,7 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
|
||||||
sec->end = sec->base + s->p_memsz;
|
sec->end = sec->base + s->p_memsz;
|
||||||
sec->next = ctx->sections;
|
sec->next = ctx->sections;
|
||||||
ctx->sections = sec;
|
ctx->sections = sec;
|
||||||
DPrintf(" Section %zx-%zx\n", sec->base, sec->end);
|
DPrintf2(" Section %zx-%zx\n", sec->base, sec->end);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,9 +15,10 @@
|
||||||
|
|
||||||
namespace __tsan {
|
namespace __tsan {
|
||||||
|
|
||||||
SyncVar::SyncVar(uptr addr)
|
SyncVar::SyncVar(uptr addr, u64 uid)
|
||||||
: mtx(MutexTypeSyncVar, StatMtxSyncVar)
|
: mtx(MutexTypeSyncVar, StatMtxSyncVar)
|
||||||
, addr(addr)
|
, addr(addr)
|
||||||
|
, uid(uid)
|
||||||
, owner_tid(kInvalidTid)
|
, owner_tid(kInvalidTid)
|
||||||
, last_lock()
|
, last_lock()
|
||||||
, recursion()
|
, recursion()
|
||||||
|
|
@ -45,9 +46,38 @@ SyncTab::~SyncTab() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
|
SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
|
||||||
uptr addr, bool write_lock) {
|
uptr addr, bool write_lock) {
|
||||||
|
return GetAndLock(thr, pc, addr, write_lock, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
|
||||||
|
return GetAndLock(0, 0, addr, write_lock, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
|
StatInc(thr, StatSyncCreated);
|
||||||
|
void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
|
||||||
|
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
|
||||||
|
SyncVar *res = new(mem) SyncVar(addr, uid);
|
||||||
#ifndef TSAN_GO
|
#ifndef TSAN_GO
|
||||||
|
res->creation_stack.ObtainCurrent(thr, pc);
|
||||||
|
#endif
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
|
||||||
|
uptr addr, bool write_lock, bool create) {
|
||||||
|
#ifndef TSAN_GO
|
||||||
|
{ // NOLINT
|
||||||
|
SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
|
||||||
|
if (res)
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Here we ask only PrimaryAllocator, because
|
||||||
|
// SecondaryAllocator::PointerIsMine() is slow and we have fallback on
|
||||||
|
// the hashmap anyway.
|
||||||
if (PrimaryAllocator::PointerIsMine((void*)addr)) {
|
if (PrimaryAllocator::PointerIsMine((void*)addr)) {
|
||||||
MBlock *b = user_mblock(thr, (void*)addr);
|
MBlock *b = user_mblock(thr, (void*)addr);
|
||||||
Lock l(&b->mtx);
|
Lock l(&b->mtx);
|
||||||
|
|
@ -57,10 +87,9 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (res == 0) {
|
if (res == 0) {
|
||||||
StatInc(thr, StatSyncCreated);
|
if (!create)
|
||||||
void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
|
return 0;
|
||||||
res = new(mem) SyncVar(addr);
|
res = Create(thr, pc, addr);
|
||||||
res->creation_stack.ObtainCurrent(thr, pc);
|
|
||||||
res->next = b->head;
|
res->next = b->head;
|
||||||
b->head = res;
|
b->head = res;
|
||||||
}
|
}
|
||||||
|
|
@ -85,6 +114,8 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!create)
|
||||||
|
return 0;
|
||||||
{
|
{
|
||||||
Lock l(&p->mtx);
|
Lock l(&p->mtx);
|
||||||
SyncVar *res = p->val;
|
SyncVar *res = p->val;
|
||||||
|
|
@ -93,12 +124,7 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (res == 0) {
|
if (res == 0) {
|
||||||
StatInc(thr, StatSyncCreated);
|
res = Create(thr, pc, addr);
|
||||||
void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
|
|
||||||
res = new(mem) SyncVar(addr);
|
|
||||||
#ifndef TSAN_GO
|
|
||||||
res->creation_stack.ObtainCurrent(thr, pc);
|
|
||||||
#endif
|
|
||||||
res->next = p->val;
|
res->next = p->val;
|
||||||
p->val = res;
|
p->val = res;
|
||||||
}
|
}
|
||||||
|
|
@ -112,6 +138,11 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
|
||||||
|
|
||||||
SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
|
SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
|
||||||
#ifndef TSAN_GO
|
#ifndef TSAN_GO
|
||||||
|
{ // NOLINT
|
||||||
|
SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
|
||||||
|
if (res)
|
||||||
|
return res;
|
||||||
|
}
|
||||||
if (PrimaryAllocator::PointerIsMine((void*)addr)) {
|
if (PrimaryAllocator::PointerIsMine((void*)addr)) {
|
||||||
MBlock *b = user_mblock(thr, (void*)addr);
|
MBlock *b = user_mblock(thr, (void*)addr);
|
||||||
SyncVar *res = 0;
|
SyncVar *res = 0;
|
||||||
|
|
|
||||||
|
|
@ -48,12 +48,13 @@ class StackTrace {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SyncVar {
|
struct SyncVar {
|
||||||
explicit SyncVar(uptr addr);
|
explicit SyncVar(uptr addr, u64 uid);
|
||||||
|
|
||||||
static const int kInvalidTid = -1;
|
static const int kInvalidTid = -1;
|
||||||
|
|
||||||
Mutex mtx;
|
Mutex mtx;
|
||||||
const uptr addr;
|
uptr addr;
|
||||||
|
const u64 uid; // Globally unique id.
|
||||||
SyncClock clock;
|
SyncClock clock;
|
||||||
SyncClock read_clock; // Used for rw mutexes only.
|
SyncClock read_clock; // Used for rw mutexes only.
|
||||||
StackTrace creation_stack;
|
StackTrace creation_stack;
|
||||||
|
|
@ -67,6 +68,18 @@ struct SyncVar {
|
||||||
SyncVar *next; // In SyncTab hashtable.
|
SyncVar *next; // In SyncTab hashtable.
|
||||||
|
|
||||||
uptr GetMemoryConsumption();
|
uptr GetMemoryConsumption();
|
||||||
|
u64 GetId() const {
|
||||||
|
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
|
||||||
|
return GetLsb((u64)addr | (uid << 47), 61);
|
||||||
|
}
|
||||||
|
bool CheckId(u64 uid) const {
|
||||||
|
CHECK_EQ(uid, GetLsb(uid, 14));
|
||||||
|
return GetLsb(this->uid, 14) == uid;
|
||||||
|
}
|
||||||
|
static uptr SplitId(u64 id, u64 *uid) {
|
||||||
|
*uid = id >> 47;
|
||||||
|
return (uptr)GetLsb(id, 47);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class SyncTab {
|
class SyncTab {
|
||||||
|
|
@ -74,13 +87,15 @@ class SyncTab {
|
||||||
SyncTab();
|
SyncTab();
|
||||||
~SyncTab();
|
~SyncTab();
|
||||||
|
|
||||||
// If the SyncVar does not exist yet, it is created.
|
SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
|
||||||
SyncVar* GetAndLock(ThreadState *thr, uptr pc,
|
uptr addr, bool write_lock);
|
||||||
uptr addr, bool write_lock);
|
SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
|
||||||
|
|
||||||
// If the SyncVar does not exist, returns 0.
|
// If the SyncVar does not exist, returns 0.
|
||||||
SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
|
SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
|
||||||
|
|
||||||
|
SyncVar* Create(ThreadState *thr, uptr pc, uptr addr);
|
||||||
|
|
||||||
uptr GetMemoryConsumption(uptr *nsync);
|
uptr GetMemoryConsumption(uptr *nsync);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
@ -94,9 +109,13 @@ class SyncTab {
|
||||||
// FIXME: Implement something more sane.
|
// FIXME: Implement something more sane.
|
||||||
static const int kPartCount = 1009;
|
static const int kPartCount = 1009;
|
||||||
Part tab_[kPartCount];
|
Part tab_[kPartCount];
|
||||||
|
atomic_uint64_t uid_gen_;
|
||||||
|
|
||||||
int PartIdx(uptr addr);
|
int PartIdx(uptr addr);
|
||||||
|
|
||||||
|
SyncVar* GetAndLock(ThreadState *thr, uptr pc,
|
||||||
|
uptr addr, bool write_lock, bool create);
|
||||||
|
|
||||||
SyncTab(const SyncTab&); // Not implemented.
|
SyncTab(const SyncTab&); // Not implemented.
|
||||||
void operator = (const SyncTab&); // Not implemented.
|
void operator = (const SyncTab&); // Not implemented.
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@
|
||||||
#include "tsan_defs.h"
|
#include "tsan_defs.h"
|
||||||
#include "tsan_mutex.h"
|
#include "tsan_mutex.h"
|
||||||
#include "tsan_sync.h"
|
#include "tsan_sync.h"
|
||||||
|
#include "tsan_mutexset.h"
|
||||||
|
|
||||||
namespace __tsan {
|
namespace __tsan {
|
||||||
|
|
||||||
|
|
@ -41,6 +42,7 @@ typedef u64 Event;
|
||||||
struct TraceHeader {
|
struct TraceHeader {
|
||||||
StackTrace stack0; // Start stack for the trace.
|
StackTrace stack0; // Start stack for the trace.
|
||||||
u64 epoch0; // Start epoch for the trace.
|
u64 epoch0; // Start epoch for the trace.
|
||||||
|
MutexSet mset0;
|
||||||
#ifndef TSAN_GO
|
#ifndef TSAN_GO
|
||||||
uptr stack0buf[kTraceStackSize];
|
uptr stack0buf[kTraceStackSize];
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue