2021-11-15 13:31:45 -08:00
|
|
|
//===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "sanitizer_stack_store.h"
|
|
|
|
|
|
|
|
#include "sanitizer_atomic.h"
|
|
|
|
#include "sanitizer_common.h"
|
2021-11-15 15:18:53 -08:00
|
|
|
#include "sanitizer_stacktrace.h"
|
|
|
|
|
2021-11-15 13:31:45 -08:00
|
|
|
namespace __sanitizer {
|
|
|
|
|
2021-11-23 13:48:25 -08:00
|
|
|
namespace {
|
|
|
|
struct StackTraceHeader {
|
2021-11-23 16:52:02 -08:00
|
|
|
static constexpr u32 kStackSizeBits = 8;
|
2021-11-23 13:48:25 -08:00
|
|
|
|
2021-11-23 16:52:02 -08:00
|
|
|
u8 size;
|
|
|
|
u8 tag;
|
2021-11-23 13:48:25 -08:00
|
|
|
explicit StackTraceHeader(const StackTrace &trace)
|
2021-11-23 16:52:02 -08:00
|
|
|
: size(Min<uptr>(trace.size, (1u << 8) - 1)), tag(trace.tag) {
|
|
|
|
CHECK_EQ(trace.tag, static_cast<uptr>(tag));
|
2021-11-23 13:48:25 -08:00
|
|
|
}
|
|
|
|
explicit StackTraceHeader(uptr h)
|
|
|
|
: size(h & ((1 << kStackSizeBits) - 1)), tag(h >> kStackSizeBits) {}
|
|
|
|
|
|
|
|
uptr ToUptr() const {
|
|
|
|
return static_cast<uptr>(size) | (static_cast<uptr>(tag) << kStackSizeBits);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
2021-11-15 15:18:53 -08:00
|
|
|
|
2021-11-23 15:45:08 -08:00
|
|
|
StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) {
|
2021-11-23 12:51:12 -08:00
|
|
|
if (!trace.size && !trace.tag)
|
|
|
|
return 0;
|
2021-11-23 13:48:25 -08:00
|
|
|
StackTraceHeader h(trace);
|
2021-11-23 15:45:08 -08:00
|
|
|
uptr idx = 0;
|
|
|
|
*pack = 0;
|
|
|
|
uptr *stack_trace = Alloc(h.size + 1, &idx, pack);
|
2021-11-23 13:48:25 -08:00
|
|
|
*stack_trace = h.ToUptr();
|
|
|
|
internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));
|
2021-11-23 15:45:08 -08:00
|
|
|
*pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1);
|
2021-11-23 12:35:39 -08:00
|
|
|
return OffsetToId(idx);
|
2021-11-15 15:18:53 -08:00
|
|
|
}
|
|
|
|
|
2021-11-23 13:49:41 -08:00
|
|
|
StackTrace StackStore::Load(Id id) const {
|
2021-11-23 12:51:12 -08:00
|
|
|
if (!id)
|
|
|
|
return {};
|
2021-11-23 12:35:39 -08:00
|
|
|
uptr idx = IdToOffset(id);
|
|
|
|
uptr block_idx = GetBlockIdx(idx);
|
|
|
|
CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
|
2021-11-23 15:41:25 -08:00
|
|
|
const uptr *stack_trace = blocks_[block_idx].Get();
|
2021-11-23 12:35:39 -08:00
|
|
|
if (!stack_trace)
|
|
|
|
return {};
|
|
|
|
stack_trace += GetInBlockIdx(idx);
|
2021-11-23 13:48:25 -08:00
|
|
|
StackTraceHeader h(*stack_trace);
|
|
|
|
return StackTrace(stack_trace + 1, h.size, h.tag);
|
2021-11-15 15:18:53 -08:00
|
|
|
}
|
|
|
|
|
2021-11-23 12:41:28 -08:00
|
|
|
uptr StackStore::Allocated() const {
|
2021-11-18 19:39:45 -08:00
|
|
|
return RoundUpTo(atomic_load_relaxed(&total_frames_) * sizeof(uptr),
|
|
|
|
GetPageSizeCached()) +
|
|
|
|
sizeof(*this);
|
2021-11-23 12:41:28 -08:00
|
|
|
}
|
|
|
|
|
2021-11-23 15:45:08 -08:00
|
|
|
uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
|
2021-11-15 13:31:45 -08:00
|
|
|
for (;;) {
|
2021-11-18 19:39:45 -08:00
|
|
|
// Optimisic lock-free allocation, essentially try to bump the
|
|
|
|
// total_frames_.
|
|
|
|
uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed);
|
|
|
|
uptr block_idx = GetBlockIdx(start);
|
2021-11-23 15:45:08 -08:00
|
|
|
uptr last_idx = GetBlockIdx(start + count - 1);
|
|
|
|
if (LIKELY(block_idx == last_idx)) {
|
2021-11-18 19:39:45 -08:00
|
|
|
// Fits into the a single block.
|
|
|
|
CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
|
2021-11-23 12:35:39 -08:00
|
|
|
*idx = start;
|
2021-11-18 19:39:45 -08:00
|
|
|
return blocks_[block_idx].GetOrCreate() + GetInBlockIdx(start);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retry. We can't use range allocated in two different blocks.
|
2021-11-23 15:45:08 -08:00
|
|
|
CHECK_LE(count, kBlockSizeFrames);
|
|
|
|
uptr in_first = kBlockSizeFrames - GetInBlockIdx(start);
|
|
|
|
// Mark tail/head of these blocks as "stored".to avoid waiting before we can
|
|
|
|
// Pack().
|
|
|
|
*pack += blocks_[block_idx].Stored(in_first);
|
|
|
|
*pack += blocks_[last_idx].Stored(count - in_first);
|
2021-11-15 13:31:45 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-23 15:45:08 -08:00
|
|
|
void StackStore::Pack() {
|
|
|
|
// TODO
|
|
|
|
}
|
|
|
|
|
2021-11-18 19:39:45 -08:00
|
|
|
void StackStore::TestOnlyUnmap() {
|
|
|
|
for (BlockInfo &b : blocks_) b.TestOnlyUnmap();
|
|
|
|
internal_memset(this, 0, sizeof(*this));
|
|
|
|
}
|
|
|
|
|
|
|
|
uptr *StackStore::BlockInfo::Get() const {
|
|
|
|
// Idiomatic double-checked locking uses memory_order_acquire here. But
|
|
|
|
// relaxed is find for us, justification is similar to
|
|
|
|
// TwoLevelMap::GetOrCreate.
|
|
|
|
return reinterpret_cast<uptr *>(atomic_load_relaxed(&data_));
|
2021-11-15 13:31:45 -08:00
|
|
|
}
|
|
|
|
|
2021-11-18 19:39:45 -08:00
|
|
|
uptr *StackStore::BlockInfo::Create() {
|
2021-11-18 19:37:39 -08:00
|
|
|
SpinMutexLock l(&mtx_);
|
2021-11-18 19:39:45 -08:00
|
|
|
uptr *ptr = Get();
|
|
|
|
if (!ptr) {
|
|
|
|
ptr = reinterpret_cast<uptr *>(
|
|
|
|
MmapNoReserveOrDie(kBlockSizeBytes, "StackStore"));
|
|
|
|
atomic_store(&data_, reinterpret_cast<uptr>(ptr), memory_order_release);
|
2021-11-15 13:31:45 -08:00
|
|
|
}
|
2021-11-18 19:39:45 -08:00
|
|
|
return ptr;
|
2021-11-15 13:31:45 -08:00
|
|
|
}
|
|
|
|
|
2021-11-18 19:39:45 -08:00
|
|
|
uptr *StackStore::BlockInfo::GetOrCreate() {
|
|
|
|
uptr *ptr = Get();
|
|
|
|
if (LIKELY(ptr))
|
|
|
|
return ptr;
|
|
|
|
return Create();
|
|
|
|
}
|
|
|
|
|
|
|
|
void StackStore::BlockInfo::TestOnlyUnmap() {
|
|
|
|
if (uptr *ptr = Get())
|
|
|
|
UnmapOrDie(ptr, StackStore::kBlockSizeBytes);
|
2021-11-15 13:31:45 -08:00
|
|
|
}
|
|
|
|
|
2021-11-23 15:45:08 -08:00
|
|
|
bool StackStore::BlockInfo::Stored(uptr n) {
|
|
|
|
return n + atomic_fetch_add(&stored_, n, memory_order_release) ==
|
|
|
|
kBlockSizeFrames;
|
|
|
|
}
|
|
|
|
|
2021-11-15 13:31:45 -08:00
|
|
|
} // namespace __sanitizer
|