2012-08-28 14:27:06 +00:00
|
|
|
//===-- sanitizer_stacktrace.h ----------------------------------*- C++ -*-===//
|
|
|
|
//
|
2019-01-19 08:50:56 +00:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-08-28 14:27:06 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is shared between AddressSanitizer and ThreadSanitizer
|
|
|
|
// run-time libraries.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#ifndef SANITIZER_STACKTRACE_H
|
|
|
|
#define SANITIZER_STACKTRACE_H
|
|
|
|
|
2021-05-19 16:06:30 -07:00
|
|
|
#include "sanitizer_common.h"
|
2012-08-28 14:48:28 +00:00
|
|
|
#include "sanitizer_internal_defs.h"
|
2020-10-04 12:38:06 +03:00
|
|
|
#include "sanitizer_platform.h"
|
2012-08-28 14:27:06 +00:00
|
|
|
|
|
|
|
namespace __sanitizer {
|
|
|
|
|
2019-03-01 04:03:38 +00:00
|
|
|
struct BufferedStackTrace;
|
|
|
|
|
2021-11-23 16:52:02 -08:00
|
|
|
static const u32 kStackTraceMax = 255;
|
2012-08-28 14:27:06 +00:00
|
|
|
|
SanitizerCommon: fixes for unwinding & backtrace on SPARC
Summary:
This patch contains various fixes for the unwinding and backtrace machinery on the SPARC, which doesn't work correctly in various cases. It was tested with GCC on SPARC/Solaris and SPARC/Linux.
Patch by Eric Botcazou.
Reviewers: #sanitizers, vitalybuka
Reviewed By: #sanitizers, vitalybuka
Subscribers: jrtc27, delcypher, vitalybuka, ro, jyknight, kubamracek, fedor.sergeev, jdoerfert, llvm-commits, #sanitizers
Tags: #sanitizers, #llvm
Differential Revision: https://reviews.llvm.org/D58431
llvm-svn: 355965
2019-03-12 20:31:53 +00:00
|
|
|
#if SANITIZER_LINUX && defined(__mips__)
|
2013-11-07 06:33:06 +00:00
|
|
|
# define SANITIZER_CAN_FAST_UNWIND 0
|
|
|
|
#elif SANITIZER_WINDOWS
|
|
|
|
# define SANITIZER_CAN_FAST_UNWIND 0
|
2013-05-08 12:45:55 +00:00
|
|
|
#else
|
2013-11-07 06:33:06 +00:00
|
|
|
# define SANITIZER_CAN_FAST_UNWIND 1
|
2013-05-08 12:45:55 +00:00
|
|
|
#endif
|
|
|
|
|
2014-11-10 15:22:04 +00:00
|
|
|
// Fast unwind is the only option on Mac for now; we will need to
|
|
|
|
// revisit this macro when slow unwind works on Mac, see
|
2015-12-04 17:50:03 +00:00
|
|
|
// https://github.com/google/sanitizers/issues/137
|
2022-05-23 14:35:42 -07:00
|
|
|
#if SANITIZER_APPLE
|
2021-06-14 17:46:48 -07:00
|
|
|
# define SANITIZER_CAN_SLOW_UNWIND 0
|
2014-11-10 15:22:04 +00:00
|
|
|
#else
|
|
|
|
# define SANITIZER_CAN_SLOW_UNWIND 1
|
|
|
|
#endif
|
|
|
|
|
2012-08-28 14:27:06 +00:00
|
|
|
struct StackTrace {
|
2014-10-26 03:35:14 +00:00
|
|
|
const uptr *trace;
|
2015-01-22 13:33:16 +00:00
|
|
|
u32 size;
|
|
|
|
u32 tag;
|
2013-10-12 12:40:47 +00:00
|
|
|
|
2015-01-22 13:33:16 +00:00
|
|
|
static const int TAG_UNKNOWN = 0;
|
|
|
|
static const int TAG_ALLOC = 1;
|
|
|
|
static const int TAG_DEALLOC = 2;
|
|
|
|
static const int TAG_CUSTOM = 100; // Tool specific tags start here.
|
|
|
|
|
|
|
|
StackTrace() : trace(nullptr), size(0), tag(0) {}
|
|
|
|
StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {}
|
|
|
|
StackTrace(const uptr *trace, u32 size, u32 tag)
|
|
|
|
: trace(trace), size(size), tag(tag) {}
|
2012-08-28 14:27:06 +00:00
|
|
|
|
2014-10-26 03:35:14 +00:00
|
|
|
// Prints a symbolized stacktrace, followed by an empty line.
|
|
|
|
void Print() const;
|
2012-08-28 14:27:06 +00:00
|
|
|
|
2021-05-19 16:06:30 -07:00
|
|
|
// Prints a symbolized stacktrace to the output string, followed by an empty
|
|
|
|
// line.
|
|
|
|
void PrintTo(InternalScopedString *output) const;
|
|
|
|
|
|
|
|
// Prints a symbolized stacktrace to the output buffer, followed by an empty
|
|
|
|
// line. Returns the number of symbols that should have been written to buffer
|
|
|
|
// (not including trailing '\0'). Thus, the string is truncated iff return
|
|
|
|
// value is not less than "out_buf_size".
|
|
|
|
uptr PrintTo(char *out_buf, uptr out_buf_size) const;
|
|
|
|
|
2013-11-07 07:28:33 +00:00
|
|
|
static bool WillUseFastUnwind(bool request_fast_unwind) {
|
|
|
|
if (!SANITIZER_CAN_FAST_UNWIND)
|
|
|
|
return false;
|
2019-02-23 02:36:23 +00:00
|
|
|
if (!SANITIZER_CAN_SLOW_UNWIND)
|
2013-11-07 07:28:33 +00:00
|
|
|
return true;
|
|
|
|
return request_fast_unwind;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uptr GetCurrentPc();
|
2014-12-20 01:45:28 +00:00
|
|
|
static inline uptr GetPreviousInstructionPc(uptr pc);
|
2014-11-19 21:42:33 +00:00
|
|
|
static uptr GetNextInstructionPc(uptr pc);
|
2014-10-26 03:35:14 +00:00
|
|
|
};
|
|
|
|
|
2014-12-20 01:45:28 +00:00
|
|
|
// Performance-critical, must be in the header.
|
|
|
|
ALWAYS_INLINE
|
|
|
|
uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
|
|
|
|
#if defined(__arm__)
|
2018-05-23 09:18:10 +00:00
|
|
|
// T32 (Thumb) branch instructions might be 16 or 32 bit long,
|
|
|
|
// so we return (pc-2) in that case in order to be safe.
|
|
|
|
// For A32 mode we return (pc-4) because all instructions are 32 bit long.
|
|
|
|
return (pc - 3) & (~1);
|
2014-12-20 01:45:28 +00:00
|
|
|
#elif defined(__sparc__) || defined(__mips__)
|
|
|
|
return pc - 8;
|
2020-09-24 10:23:45 +03:00
|
|
|
#elif SANITIZER_RISCV64
|
2023-03-27 09:15:53 -07:00
|
|
|
// RV-64 has variable instruction length...
|
2020-09-24 10:23:45 +03:00
|
|
|
// C extentions gives us 2-byte instructoins
|
|
|
|
// RV-64 has 4-byte instructions
|
2023-03-27 09:15:53 -07:00
|
|
|
// + RISC-V architecture allows instructions up to 8 bytes
|
2020-09-24 10:23:45 +03:00
|
|
|
// It seems difficult to figure out the exact instruction length -
|
|
|
|
// pc - 2 seems like a safe option for the purposes of stack tracing
|
|
|
|
return pc - 2;
|
2022-02-23 13:35:22 -08:00
|
|
|
#elif SANITIZER_S390 || SANITIZER_I386 || SANITIZER_X32 || SANITIZER_X64
|
2014-12-20 01:45:28 +00:00
|
|
|
return pc - 1;
|
2022-02-22 16:20:40 -08:00
|
|
|
#else
|
|
|
|
return pc - 4;
|
2014-12-20 01:45:28 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-10-26 03:35:14 +00:00
|
|
|
// StackTrace that owns the buffer used to store the addresses.
|
|
|
|
struct BufferedStackTrace : public StackTrace {
|
|
|
|
uptr trace_buffer[kStackTraceMax];
|
|
|
|
uptr top_frame_bp; // Optional bp of a top frame.
|
|
|
|
|
|
|
|
BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
|
|
|
|
|
2014-11-03 22:23:44 +00:00
|
|
|
void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
|
2019-03-01 04:03:38 +00:00
|
|
|
|
2019-03-01 22:10:49 +00:00
|
|
|
// Get the stack trace with the given pc and bp.
|
|
|
|
// The pc will be in the position 0 of the resulting stack trace.
|
|
|
|
// The bp may refer to the current frame or to the caller's frame.
|
2019-03-01 04:03:38 +00:00
|
|
|
void Unwind(uptr pc, uptr bp, void *context, bool request_fast,
|
|
|
|
u32 max_depth = kStackTraceMax) {
|
|
|
|
top_frame_bp = (max_depth > 0) ? bp : 0;
|
|
|
|
// Small max_depth optimization
|
|
|
|
if (max_depth <= 1) {
|
|
|
|
if (max_depth == 1)
|
|
|
|
trace_buffer[0] = pc;
|
|
|
|
size = max_depth;
|
|
|
|
return;
|
|
|
|
}
|
2019-03-01 22:10:49 +00:00
|
|
|
UnwindImpl(pc, bp, context, request_fast, max_depth);
|
2019-03-01 04:03:38 +00:00
|
|
|
}
|
|
|
|
|
2015-01-22 13:33:16 +00:00
|
|
|
void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
|
2014-10-26 03:35:14 +00:00
|
|
|
uptr stack_bottom, bool request_fast_unwind);
|
2013-11-07 07:28:33 +00:00
|
|
|
|
2017-09-14 03:06:35 +00:00
|
|
|
void Reset() {
|
|
|
|
*static_cast<StackTrace *>(this) = StackTrace(trace_buffer, 0);
|
|
|
|
top_frame_bp = 0;
|
|
|
|
}
|
|
|
|
|
2013-11-07 07:28:33 +00:00
|
|
|
private:
|
2019-03-01 22:10:49 +00:00
|
|
|
// Every runtime defines its own implementation of this method
|
|
|
|
void UnwindImpl(uptr pc, uptr bp, void *context, bool request_fast,
|
|
|
|
u32 max_depth);
|
|
|
|
|
|
|
|
// UnwindFast/Slow have platform-specific implementations
|
2019-02-22 22:03:09 +00:00
|
|
|
void UnwindFast(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
|
|
|
|
u32 max_depth);
|
|
|
|
void UnwindSlow(uptr pc, u32 max_depth);
|
|
|
|
void UnwindSlow(uptr pc, void *context, u32 max_depth);
|
2019-03-01 22:10:49 +00:00
|
|
|
|
2013-12-09 19:52:39 +00:00
|
|
|
void PopStackFrames(uptr count);
|
2013-11-15 10:57:56 +00:00
|
|
|
uptr LocatePcInTrace(uptr pc);
|
2014-11-03 22:23:44 +00:00
|
|
|
|
2017-09-14 03:06:35 +00:00
|
|
|
BufferedStackTrace(const BufferedStackTrace &) = delete;
|
|
|
|
void operator=(const BufferedStackTrace &) = delete;
|
2019-03-01 23:50:47 +00:00
|
|
|
|
|
|
|
friend class FastUnwindTest;
|
2012-08-28 14:27:06 +00:00
|
|
|
};
|
|
|
|
|
2020-08-12 22:24:10 +02:00
|
|
|
#if defined(__s390x__)
|
|
|
|
static const uptr kFrameSize = 160;
|
|
|
|
#elif defined(__s390__)
|
|
|
|
static const uptr kFrameSize = 96;
|
|
|
|
#else
|
|
|
|
static const uptr kFrameSize = 2 * sizeof(uhwptr);
|
|
|
|
#endif
|
|
|
|
|
2016-05-18 06:09:26 +00:00
|
|
|
// Check if given pointer points into allocated stack area.
|
|
|
|
static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
|
2020-08-12 22:24:10 +02:00
|
|
|
return frame > stack_bottom && frame < stack_top - kFrameSize;
|
2016-05-18 06:09:26 +00:00
|
|
|
}
|
|
|
|
|
2012-08-28 14:27:06 +00:00
|
|
|
} // namespace __sanitizer
|
|
|
|
|
|
|
|
// Use this macro if you want to print stack trace with the caller
|
|
|
|
// of the current function in the top frame.
|
2019-03-01 03:35:05 +00:00
|
|
|
#define GET_CALLER_PC_BP \
|
2012-08-28 14:27:06 +00:00
|
|
|
uptr bp = GET_CURRENT_FRAME(); \
|
2019-03-01 03:35:05 +00:00
|
|
|
uptr pc = GET_CALLER_PC();
|
2019-03-01 03:08:34 +00:00
|
|
|
|
2019-03-01 04:03:38 +00:00
|
|
|
#define GET_CALLER_PC_BP_SP \
|
|
|
|
GET_CALLER_PC_BP; \
|
|
|
|
uptr local_stack; \
|
|
|
|
uptr sp = (uptr)&local_stack
|
|
|
|
|
2019-03-01 03:35:05 +00:00
|
|
|
// Use this macro if you want to print stack trace with the current
|
|
|
|
// function in the top frame.
|
2019-03-01 04:03:38 +00:00
|
|
|
#define GET_CURRENT_PC_BP \
|
2019-03-01 03:35:05 +00:00
|
|
|
uptr bp = GET_CURRENT_FRAME(); \
|
2019-03-01 04:03:38 +00:00
|
|
|
uptr pc = StackTrace::GetCurrentPc()
|
|
|
|
|
|
|
|
#define GET_CURRENT_PC_BP_SP \
|
|
|
|
GET_CURRENT_PC_BP; \
|
2012-08-28 14:27:06 +00:00
|
|
|
uptr local_stack; \
|
|
|
|
uptr sp = (uptr)&local_stack
|
|
|
|
|
2021-07-15 10:51:32 +02:00
|
|
|
// GET_CURRENT_PC() is equivalent to StackTrace::GetCurrentPc().
|
|
|
|
// Optimized x86 version is faster than GetCurrentPc because
|
|
|
|
// it does not involve a function call, instead it reads RIP register.
|
|
|
|
// Reads of RIP by an instruction return RIP pointing to the next
|
|
|
|
// instruction, which is exactly what we want here, thus 0 offset.
|
|
|
|
// It needs to be a macro because otherwise we will get the name
|
|
|
|
// of this function on the top of most stacks. Attribute artificial
|
|
|
|
// does not do what it claims to do, unfortunatley. And attribute
|
|
|
|
// __nodebug__ is clang-only. If we would have an attribute that
|
|
|
|
// would remove this function from debug info, we could simply make
|
|
|
|
// StackTrace::GetCurrentPc() faster.
|
|
|
|
#if defined(__x86_64__)
|
|
|
|
# define GET_CURRENT_PC() \
|
2021-11-05 17:04:12 +01:00
|
|
|
(__extension__({ \
|
2021-07-15 10:51:32 +02:00
|
|
|
uptr pc; \
|
|
|
|
asm("lea 0(%%rip), %0" : "=r"(pc)); \
|
|
|
|
pc; \
|
2021-11-05 17:04:12 +01:00
|
|
|
}))
|
2021-07-15 10:51:32 +02:00
|
|
|
#else
|
|
|
|
# define GET_CURRENT_PC() StackTrace::GetCurrentPc()
|
|
|
|
#endif
|
2012-08-28 14:27:06 +00:00
|
|
|
|
|
|
|
#endif // SANITIZER_STACKTRACE_H
|