tsan: detect accesses to freed memory

http://codereview.appspot.com/6214052

llvm-svn: 156990
This commit is contained in:
Dmitry Vyukov 2012-05-17 14:17:51 +00:00
parent af501a29d3
commit fee5b7d2e0
10 changed files with 106 additions and 39 deletions

View File

@ -33,4 +33,11 @@ int main() {
return 0;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK: WARNING: ThreadSanitizer: heap-use-after-free
// CHECK: Write of size 4 at {{.*}} by main thread:
// CHECK: #0 Thread2
// CHECK: #1 main
// CHECK: Previous write of size 8 at {{.*}} by thread 1:
// CHECK: #0 free
// CHECK: #1 Thread1

View File

@ -0,0 +1,26 @@
#include <stdlib.h>
void __attribute__((noinline)) foo(int *mem) {
free(mem);
}
void __attribute__((noinline)) bar(int *mem) {
mem[0] = 42;
}
int main() {
int *mem = (int*)malloc(100);
foo(mem);
bar(mem);
return 0;
}
// CHECK: WARNING: ThreadSanitizer: heap-use-after-free
// CHECK: Write of size 4 at {{.*}} by main thread:
// CHECK: #0 bar
// CHECK: #1 main
// CHECK: Previous write of size 8 at {{.*}} by main thread:
// CHECK: #0 free
// CHECK: #1 foo
// CHECK: #2 main

View File

@ -58,7 +58,7 @@ namespace __tsan {
ThreadClock::ThreadClock() {
nclk_ = 0;
for (uptr i = 0; i < (uptr)kMaxTid; i++)
for (uptr i = 0; i < (uptr)kMaxTidInClock; i++)
clk_[i] = 0;
}

View File

@ -42,7 +42,7 @@ struct ThreadClock {
ThreadClock();
u64 get(unsigned tid) const {
DCHECK(tid < kMaxTid);
DCHECK(tid < kMaxTidInClock);
return clk_[tid];
}
@ -71,7 +71,7 @@ struct ThreadClock {
private:
uptr nclk_;
u64 clk_[kMaxTid];
u64 clk_[kMaxTidInClock];
};
} // namespace __tsan

View File

@ -29,8 +29,9 @@ typedef signed long long s64; // NOLINT
typedef unsigned long uptr; // NOLINT
const uptr kPageSize = 4096;
const int kTidBits = 16;
const int kTidBits = 15;
const unsigned kMaxTid = 1 << kTidBits;
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
const int kClkBits = 40;
#ifdef TSAN_SHADOW_COUNT

View File

@ -32,6 +32,8 @@ static void PrintHeader(ReportType typ) {
if (typ == ReportTypeRace)
Printf("data race");
else if (typ == ReportTypeUseAfterFree)
Printf("heap-use-after-free");
else if (typ == ReportTypeThreadLeak)
Printf("thread leak");
else if (typ == ReportTypeMutexDestroyLocked)

View File

@ -20,6 +20,7 @@ namespace __tsan {
enum ReportType {
ReportTypeRace,
ReportTypeUseAfterFree,
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
ReportTypeSignalUnsafe,

View File

@ -373,7 +373,11 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
MemoryAccessRange(thr, pc, addr, size, true);
MemoryRangeSet(thr, pc, addr, size, kShadowFreed);
Shadow s(thr->fast_state);
s.MarkAsFreed();
s.SetWrite(true);
s.SetAddr0AndSizeLog(0, 3);
MemoryRangeSet(thr, pc, addr, size, s.raw());
}
void FuncEntry(ThreadState *thr, uptr pc) {
@ -389,16 +393,6 @@ void FuncEntry(ThreadState *thr, uptr pc) {
DCHECK(thr->shadow_stack_pos < &thr->shadow_stack[kShadowStackSize]);
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
#if 1
// While we are testing on single-threaded benchmarks,
// emulate some synchronization activity.
// FIXME: remove me later.
if (((++thr->func_call_count) % 1000) == 0) {
thr->clock.set(thr->fast_state.tid(), thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
}
#endif
}
void FuncExit(ThreadState *thr) {

View File

@ -45,17 +45,18 @@ inline void NOINLINE breakhere() {
}
// FastState (from most significant bit):
// unused : 1
// tid : kTidBits
// epoch : kClkBits
// unused :
// unused : -
// ignore_bit : 1
class FastState {
public:
FastState(u64 tid, u64 epoch) {
x_ = tid << (64 - kTidBits);
x_ |= epoch << (64 - kTidBits - kClkBits);
CHECK(tid == this->tid());
CHECK(epoch == this->epoch());
x_ = tid << kTidShift;
x_ |= epoch << kClkShift;
DCHECK(tid == this->tid());
DCHECK(epoch == this->epoch());
}
explicit FastState(u64 x)
@ -63,28 +64,37 @@ class FastState {
}
u64 tid() const {
u64 res = x_ >> (64 - kTidBits);
u64 res = x_ >> kTidShift;
return res;
}
u64 epoch() const {
u64 res = (x_ << kTidBits) >> (64 - kClkBits);
u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
return res;
};
void IncrementEpoch() {
// u64 old_epoch = epoch();
x_ += 1 << (64 - kTidBits - kClkBits);
// CHECK(old_epoch + 1 == epoch());
}
void SetIgnoreBit() { x_ |= 1; }
void ClearIgnoreBit() { x_ &= ~(u64)1; }
bool GetIgnoreBit() { return x_ & 1; }
void IncrementEpoch() {
u64 old_epoch = epoch();
x_ += 1 << kClkShift;
DCHECK(old_epoch + 1 == epoch());
(void)old_epoch;
}
void SetIgnoreBit() { x_ |= kIgnoreBit; }
void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
bool GetIgnoreBit() { return x_ & kIgnoreBit; }
private:
friend class Shadow;
static const int kTidShift = 64 - kTidBits - 1;
static const int kClkShift = kTidShift - kClkBits;
static const u64 kIgnoreBit = 1ull;
static const u64 kFreedBit = 1ull << 63;
u64 x_;
};
// Shadow (from most significant bit):
// freed : 1
// tid : kTidBits
// epoch : kClkBits
// is_write : 1
@ -116,7 +126,7 @@ class Shadow: public FastState {
u64 raw() const { return x_; }
static inline bool TidsAreEqual(Shadow s1, Shadow s2) {
u64 shifted_xor = (s1.x_ ^ s2.x_) >> (64 - kTidBits);
u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
DCHECK_EQ(shifted_xor == 0, s1.tid() == s2.tid());
return shifted_xor == 0;
}
@ -170,6 +180,25 @@ class Shadow: public FastState {
u64 size() const { return 1ull << size_log(); }
bool is_write() const { return x_ & 32; }
// The idea behind the freed bit is as follows.
// When the memory is freed (or otherwise unaccessible) we write to the shadow
// values with tid/epoch related to the free and the freed bit set.
// During memory accesses processing the freed bit is considered
// as msb of tid. So any access races with shadow with freed bit set
// (it is as if write from a thread with which we never synchronized before).
// This allows us to detect accesses to freed memory w/o additional
// overheads in memory access processing and at the same time restore
// tid/epoch of free.
void MarkAsFreed() {
x_ |= kFreedBit;
}
bool GetFreedAndReset() {
bool res = x_ & kFreedBit;
x_ &= ~kFreedBit;
return res;
}
private:
u64 size_log() const { return (x_ >> 3) & 3; }
};

View File

@ -286,6 +286,14 @@ bool OutputReport(const ScopedReport &srep, const ReportStack *suppress_stack) {
void ReportRace(ThreadState *thr) {
ScopedInRtl in_rtl;
bool freed = false;
{
Shadow s(thr->racy_state[1]);
freed = s.GetFreedAndReset();
thr->racy_state[1] = s.raw();
}
uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
uptr addr_min = 0;
uptr addr_max = 0;
@ -303,11 +311,10 @@ void ReportRace(ThreadState *thr) {
Context *ctx = CTX();
Lock l0(&ctx->thread_mtx);
ScopedReport rep(ReportTypeRace);
const uptr nmop = thr->racy_state[1] == kShadowFreed ? 1 : 2;
StackTrace traces[2];
for (uptr i = 0; i < nmop; i++) {
ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace);
const uptr kMop = 2;
StackTrace traces[kMop];
for (uptr i = 0; i < kMop; i++) {
Shadow s(thr->racy_state[i]);
RestoreStack(s.tid(), s.epoch(), &traces[i]);
}
@ -315,7 +322,7 @@ void ReportRace(ThreadState *thr) {
if (HandleRacyStacks(thr, traces, addr_min, addr_max))
return;
for (uptr i = 0; i < nmop; i++) {
for (uptr i = 0; i < kMop; i++) {
Shadow s(thr->racy_state[i]);
rep.AddMemoryAccess(addr, s, &traces[i]);
}
@ -323,7 +330,7 @@ void ReportRace(ThreadState *thr) {
// Ensure that we have at least something for the current thread.
CHECK_EQ(traces[0].IsEmpty(), false);
for (uptr i = 0; i < nmop; i++) {
for (uptr i = 0; i < kMop; i++) {
FastState s(thr->racy_state[i]);
ThreadContext *tctx = ctx->threads[s.tid()];
if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)