[compiler-rt] Replace INLINE with inline

This fixes the clash with BSD headers.

Reviewed By: vitalybuka

Differential Revision: https://reviews.llvm.org/D87562
This commit is contained in:
Kamil Rytarowski 2020-09-17 16:04:50 +02:00
parent 85ba2f1663
commit 85e578f53a
27 changed files with 123 additions and 128 deletions

View File

@ -34,7 +34,7 @@ static uptr last_dlsym_alloc_size_in_words;
static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
static INLINE bool IsInDlsymAllocPool(const void *ptr) {
static inline bool IsInDlsymAllocPool(const void *ptr) {
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
}
@ -95,12 +95,12 @@ bool IsFromLocalPool(const void *ptr) {
}
#endif
static INLINE bool MaybeInDlsym() {
static inline bool MaybeInDlsym() {
// Fuchsia doesn't use dlsym-based interceptors.
return !SANITIZER_FUCHSIA && asan_init_is_running;
}
static INLINE bool UseLocalPool() {
static inline bool UseLocalPool() {
return EarlyMalloc() || MaybeInDlsym();
}
@ -304,4 +304,4 @@ void ReplaceSystemMalloc() {
#endif // SANITIZER_ANDROID
#endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX ||
// SANITIZER_NETBSD || SANITIZER_SOLARIS
// SANITIZER_NETBSD || SANITIZER_SOLARIS

View File

@ -17,7 +17,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "asan_internal.h"
static INLINE bool EarlyMalloc() {
static inline bool EarlyMalloc() {
return SANITIZER_RTEMS &&
(!__asan::asan_inited || __asan::asan_init_is_running);
}

View File

@ -411,7 +411,7 @@ static bool IsInvalidPointerPair(uptr a1, uptr a2) {
return false;
}
static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
static inline void CheckForInvalidPointerPair(void *p1, void *p2) {
switch (flags()->detect_invalid_pointer_pairs) {
case 0:
return;

View File

@ -139,7 +139,7 @@ typedef signed short S2;
typedef signed int S4;
typedef signed long long S8;
#define NOINLINE __attribute__((noinline))
#define INLINE __attribute__((always_inline))
#define ALWAYS_INLINE __attribute__((always_inline))
static bool TrackingOrigins() {
S8 x;
@ -4312,7 +4312,7 @@ TEST(MemorySanitizerOrigins, InitializedStoreDoesNotChangeOrigin) {
} // namespace
template<class T, class BinaryOp>
INLINE
ALWAYS_INLINE
void BinaryOpOriginTest(BinaryOp op) {
U4 ox = rand(); //NOLINT
U4 oy = rand(); //NOLINT
@ -4345,12 +4345,12 @@ void BinaryOpOriginTest(BinaryOp op) {
EXPECT_ORIGIN(ox, __msan_get_origin(z));
}
template<class T> INLINE T XOR(const T &a, const T&b) { return a ^ b; }
template<class T> INLINE T ADD(const T &a, const T&b) { return a + b; }
template<class T> INLINE T SUB(const T &a, const T&b) { return a - b; }
template<class T> INLINE T MUL(const T &a, const T&b) { return a * b; }
template<class T> INLINE T AND(const T &a, const T&b) { return a & b; }
template<class T> INLINE T OR (const T &a, const T&b) { return a | b; }
template<class T> ALWAYS_INLINE T XOR(const T &a, const T&b) { return a ^ b; }
template<class T> ALWAYS_INLINE T ADD(const T &a, const T&b) { return a + b; }
template<class T> ALWAYS_INLINE T SUB(const T &a, const T&b) { return a - b; }
template<class T> ALWAYS_INLINE T MUL(const T &a, const T&b) { return a * b; }
template<class T> ALWAYS_INLINE T AND(const T &a, const T&b) { return a & b; }
template<class T> ALWAYS_INLINE T OR (const T &a, const T&b) { return a | b; }
TEST(MemorySanitizerOrigins, BinaryOp) {
if (!TrackingOrigins()) return;
@ -4704,7 +4704,7 @@ static void TestBZHI() {
__builtin_ia32_bzhi_di(0xABCDABCDABCDABCD, Poisoned<U8>(1, 0xFFFFFFFF00000000ULL)));
}
inline U4 bextr_imm(U4 start, U4 len) {
ALWAYS_INLINE U4 bextr_imm(U4 start, U4 len) {
start &= 0xFF;
len &= 0xFF;
return (len << 8) | start;

View File

@ -52,14 +52,14 @@ struct NoOpMapUnmapCallback {
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
INLINE u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
inline u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
return (*state = *state * 1103515245 + 12345) >> 16;
}
INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
inline u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
template<typename T>
INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
if (n <= 1) return;
u32 state = *rand_state;
for (u32 i = n - 1; i > 0; i--)

View File

@ -27,7 +27,7 @@ namespace __sanitizer {
void SetErrnoToENOMEM();
// A common errno setting logic shared by almost all sanitizer allocator APIs.
INLINE void *SetErrnoOnNull(void *ptr) {
inline void *SetErrnoOnNull(void *ptr) {
if (UNLIKELY(!ptr))
SetErrnoToENOMEM();
return ptr;
@ -41,7 +41,7 @@ INLINE void *SetErrnoOnNull(void *ptr) {
// two and that the size is a multiple of alignment for POSIX implementation,
// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple
// of alignment.
INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
inline bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
#if SANITIZER_POSIX
return alignment != 0 && IsPowerOfTwo(alignment) &&
(size & (alignment - 1)) == 0;
@ -52,13 +52,13 @@ INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
// Checks posix_memalign() parameters, verifies that alignment is a power of two
// and a multiple of sizeof(void *).
INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
inline bool CheckPosixMemalignAlignment(uptr alignment) {
return alignment != 0 && IsPowerOfTwo(alignment) &&
(alignment % sizeof(void *)) == 0;
}
// Returns true if calloc(size, n) call overflows on size*n calculation.
INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
inline bool CheckForCallocOverflow(uptr size, uptr n) {
if (!size)
return false;
uptr max = (uptr)-1L;
@ -67,7 +67,7 @@ INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
// Returns true if the size passed to pvalloc overflows when rounded to the next
// multiple of page_size.
INLINE bool CheckForPvallocOverflow(uptr size, uptr page_size) {
inline bool CheckForPvallocOverflow(uptr size, uptr page_size) {
return RoundUpTo(size, page_size) < size;
}

View File

@ -18,8 +18,8 @@
// (currently, 32 bits and internal allocator).
class LargeMmapAllocatorPtrArrayStatic {
public:
INLINE void *Init() { return &p_[0]; }
INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
inline void *Init() { return &p_[0]; }
inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
private:
static const int kMaxNumChunks = 1 << 15;
uptr p_[kMaxNumChunks];
@ -31,14 +31,14 @@ class LargeMmapAllocatorPtrArrayStatic {
// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
class LargeMmapAllocatorPtrArrayDynamic {
public:
INLINE void *Init() {
inline void *Init() {
uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
SecondaryAllocatorName);
CHECK(p);
return reinterpret_cast<void*>(p);
}
INLINE void EnsureSpace(uptr n) {
inline void EnsureSpace(uptr n) {
CHECK_LT(n, kMaxNumChunks);
DCHECK(n <= n_reserved_);
if (UNLIKELY(n == n_reserved_)) {

View File

@ -72,12 +72,12 @@ namespace __sanitizer {
// Clutter-reducing helpers.
template<typename T>
INLINE typename T::Type atomic_load_relaxed(const volatile T *a) {
inline typename T::Type atomic_load_relaxed(const volatile T *a) {
return atomic_load(a, memory_order_relaxed);
}
template<typename T>
INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) {
inline void atomic_store_relaxed(volatile T *a, typename T::Type v) {
atomic_store(a, v, memory_order_relaxed);
}

View File

@ -34,16 +34,16 @@ namespace __sanitizer {
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.
INLINE void atomic_signal_fence(memory_order) {
inline void atomic_signal_fence(memory_order) {
__asm__ __volatile__("" ::: "memory");
}
INLINE void atomic_thread_fence(memory_order) {
inline void atomic_thread_fence(memory_order) {
__sync_synchronize();
}
template<typename T>
INLINE typename T::Type atomic_fetch_add(volatile T *a,
inline typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -51,7 +51,7 @@ INLINE typename T::Type atomic_fetch_add(volatile T *a,
}
template<typename T>
INLINE typename T::Type atomic_fetch_sub(volatile T *a,
inline typename T::Type atomic_fetch_sub(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -59,7 +59,7 @@ INLINE typename T::Type atomic_fetch_sub(volatile T *a,
}
template<typename T>
INLINE typename T::Type atomic_exchange(volatile T *a,
inline typename T::Type atomic_exchange(volatile T *a,
typename T::Type v, memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
@ -71,7 +71,7 @@ INLINE typename T::Type atomic_exchange(volatile T *a,
}
template <typename T>
INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
typedef typename T::Type Type;
@ -84,7 +84,7 @@ INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
}
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {

View File

@ -37,7 +37,7 @@ static struct {
} __attribute__((aligned(32))) lock = {0, {0}};
template <>
INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
DCHECK(mo &
@ -55,14 +55,14 @@ INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
}
template <>
INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
inline atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
return atomic_fetch_add(ptr, -val, mo);
}
template <>
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type *cmp,
atomic_uint64_t::Type xchg,
memory_order mo) {
@ -87,7 +87,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
}
template <>
INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
@ -100,7 +100,7 @@ INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
}
template <>
INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));

View File

@ -17,12 +17,12 @@
namespace __sanitizer {
INLINE void proc_yield(int cnt) {
inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
INLINE typename T::Type atomic_load(
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
@ -60,7 +60,7 @@ INLINE typename T::Type atomic_load(
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));

View File

@ -16,7 +16,7 @@
namespace __sanitizer {
INLINE void proc_yield(int cnt) {
inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
@ -24,7 +24,7 @@ INLINE void proc_yield(int cnt) {
}
template<typename T>
INLINE typename T::Type atomic_load(
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
@ -70,7 +70,7 @@ INLINE typename T::Type atomic_load(
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));

View File

@ -54,21 +54,21 @@ extern "C" long long _InterlockedExchangeAdd64(long long volatile *Addend,
namespace __sanitizer {
INLINE void atomic_signal_fence(memory_order) {
inline void atomic_signal_fence(memory_order) {
_ReadWriteBarrier();
}
INLINE void atomic_thread_fence(memory_order) {
inline void atomic_thread_fence(memory_order) {
_mm_mfence();
}
INLINE void proc_yield(int cnt) {
inline void proc_yield(int cnt) {
for (int i = 0; i < cnt; i++)
_mm_pause();
}
template<typename T>
INLINE typename T::Type atomic_load(
inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
@ -86,7 +86,7 @@ INLINE typename T::Type atomic_load(
}
template<typename T>
INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
@ -102,7 +102,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
atomic_thread_fence(memory_order_seq_cst);
}
INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
inline u32 atomic_fetch_add(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -110,7 +110,7 @@ INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
(long)v);
}
INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
inline uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -123,7 +123,7 @@ INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
#endif
}
INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
inline u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -131,7 +131,7 @@ INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
-(long)v);
}
INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
inline uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@ -144,28 +144,28 @@ INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
#endif
}
INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
inline u8 atomic_exchange(volatile atomic_uint8_t *a,
u8 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
}
INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
inline u16 atomic_exchange(volatile atomic_uint16_t *a,
u16 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
}
INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
inline u32 atomic_exchange(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
inline bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
u8 *cmp,
u8 xchgv,
memory_order mo) {
@ -191,7 +191,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
inline bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr *cmp,
uptr xchg,
memory_order mo) {
@ -204,7 +204,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
inline bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
u16 *cmp,
u16 xchg,
memory_order mo) {
@ -217,7 +217,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
inline bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
u32 *cmp,
u32 xchg,
memory_order mo) {
@ -230,7 +230,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
return false;
}
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
u64 *cmp,
u64 xchg,
memory_order mo) {
@ -244,7 +244,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
}
template<typename T>
INLINE bool atomic_compare_exchange_weak(volatile T *a,
inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {

View File

@ -53,25 +53,25 @@ const u64 kExternalPCBit = 1ULL << 60;
extern const char *SanitizerToolName; // Can be changed by the tool.
extern atomic_uint32_t current_verbosity;
INLINE void SetVerbosity(int verbosity) {
inline void SetVerbosity(int verbosity) {
atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
}
INLINE int Verbosity() {
inline int Verbosity() {
return atomic_load(&current_verbosity, memory_order_relaxed);
}
#if SANITIZER_ANDROID
INLINE uptr GetPageSize() {
inline uptr GetPageSize() {
// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
return 4096;
}
INLINE uptr GetPageSizeCached() {
inline uptr GetPageSizeCached() {
return 4096;
}
#else
uptr GetPageSize();
extern uptr PageSizeCached;
INLINE uptr GetPageSizeCached() {
inline uptr GetPageSizeCached() {
if (!PageSizeCached)
PageSizeCached = GetPageSize();
return PageSizeCached;
@ -91,7 +91,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
// Memory management
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type, /*raw_report*/ true);
}
void UnmapOrDie(void *addr, uptr size);
@ -374,7 +374,7 @@ unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
}
#endif
INLINE uptr MostSignificantSetBitIndex(uptr x) {
inline uptr MostSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U);
unsigned long up;
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
@ -391,7 +391,7 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) {
return up;
}
INLINE uptr LeastSignificantSetBitIndex(uptr x) {
inline uptr LeastSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U);
unsigned long up;
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
@ -408,11 +408,11 @@ INLINE uptr LeastSignificantSetBitIndex(uptr x) {
return up;
}
INLINE bool IsPowerOfTwo(uptr x) {
inline bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
}
INLINE uptr RoundUpToPowerOfTwo(uptr size) {
inline uptr RoundUpToPowerOfTwo(uptr size) {
CHECK(size);
if (IsPowerOfTwo(size)) return size;
@ -422,20 +422,20 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
return 1ULL << (up + 1);
}
INLINE uptr RoundUpTo(uptr size, uptr boundary) {
inline uptr RoundUpTo(uptr size, uptr boundary) {
RAW_CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
INLINE uptr RoundDownTo(uptr x, uptr boundary) {
inline uptr RoundDownTo(uptr x, uptr boundary) {
return x & ~(boundary - 1);
}
INLINE bool IsAligned(uptr a, uptr alignment) {
inline bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
INLINE uptr Log2(uptr x) {
inline uptr Log2(uptr x) {
CHECK(IsPowerOfTwo(x));
return LeastSignificantSetBitIndex(x);
}
@ -451,14 +451,14 @@ template<class T> void Swap(T& a, T& b) {
}
// Char handling
INLINE bool IsSpace(int c) {
inline bool IsSpace(int c) {
return (c == ' ') || (c == '\n') || (c == '\t') ||
(c == '\f') || (c == '\r') || (c == '\v');
}
INLINE bool IsDigit(int c) {
inline bool IsDigit(int c) {
return (c >= '0') && (c <= '9');
}
INLINE int ToLower(int c) {
inline int ToLower(int c) {
return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
}
@ -840,15 +840,15 @@ void WriteToSyslog(const char *buffer);
#if SANITIZER_MAC || SANITIZER_WIN_TRACE
void LogFullErrorReport(const char *buffer);
#else
INLINE void LogFullErrorReport(const char *buffer) {}
inline void LogFullErrorReport(const char *buffer) {}
#endif
#if SANITIZER_LINUX || SANITIZER_MAC
void WriteOneLineToSyslog(const char *s);
void LogMessageOnPrintf(const char *str);
#else
INLINE void WriteOneLineToSyslog(const char *s) {}
INLINE void LogMessageOnPrintf(const char *str) {}
inline void WriteOneLineToSyslog(const char *s) {}
inline void LogMessageOnPrintf(const char *str) {}
#endif
#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
@ -856,21 +856,21 @@ INLINE void LogMessageOnPrintf(const char *str) {}
void AndroidLogInit();
void SetAbortMessage(const char *);
#else
INLINE void AndroidLogInit() {}
inline void AndroidLogInit() {}
// FIXME: MacOS implementation could use CRSetCrashLogMessage.
INLINE void SetAbortMessage(const char *) {}
inline void SetAbortMessage(const char *) {}
#endif
#if SANITIZER_ANDROID
void SanitizerInitializeUnwinder();
AndroidApiLevel AndroidGetApiLevel();
#else
INLINE void AndroidLogWrite(const char *buffer_unused) {}
INLINE void SanitizerInitializeUnwinder() {}
INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
inline void AndroidLogWrite(const char *buffer_unused) {}
inline void SanitizerInitializeUnwinder() {}
inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
#endif
INLINE uptr GetPthreadDestructorIterations() {
inline uptr GetPthreadDestructorIterations() {
#if SANITIZER_ANDROID
return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
#elif SANITIZER_POSIX
@ -976,7 +976,7 @@ RunOnDestruction<Fn> at_scope_exit(Fn fn) {
#if SANITIZER_LINUX && SANITIZER_S390_64
void AvoidCVE_2016_2143();
#else
INLINE void AvoidCVE_2016_2143() {}
inline void AvoidCVE_2016_2143() {}
#endif
struct StackDepotStats {
@ -997,7 +997,7 @@ bool GetRandom(void *buffer, uptr length, bool blocking = true);
// Returns the number of logical processors on the system.
u32 GetNumberOfCPUs();
extern u32 NumberOfCPUsCached;
INLINE u32 GetNumberOfCPUsCached() {
inline u32 GetNumberOfCPUsCached() {
if (!NumberOfCPUsCached)
NumberOfCPUsCached = GetNumberOfCPUs();
return NumberOfCPUsCached;

View File

@ -196,9 +196,6 @@ typedef u64 tid_t;
// This header should NOT include any other headers to avoid portability issues.
// Common defs.
#ifndef INLINE
#define INLINE inline
#endif
#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
#define SANITIZER_WEAK_DEFAULT_IMPL \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE

View File

@ -109,7 +109,7 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
// Releases memory pages entirely within the [beg, end] address range.
// The pages no longer count toward RSS; reads are guaranteed to return 0.
// Requires (but does not verify!) that pages are MAP_PRIVATE.
INLINE void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
// man madvise on Linux promises zero-fill for anonymous private pages.
// Testing shows the same behaviour for private (but not anonymous) mappings
// of shm_open() files, as long as the underlying file is untouched.

View File

@ -772,7 +772,7 @@ void LogMessageOnPrintf(const char *str) {
// initialized after the vDSO function pointers, so if it exists, is not null
// and is not empty, we can use clock_gettime.
extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
INLINE bool CanUseVDSO() {
inline bool CanUseVDSO() {
// Bionic is safe, it checks for the vDSO function pointers to be initialized.
if (SANITIZER_ANDROID)
return true;

View File

@ -75,7 +75,7 @@ asm(".desc ___crashreporter_info__, 0x10");
namespace __sanitizer {
static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
INLINE void CRAppendCrashLogMessage(const char *msg) {
inline void CRAppendCrashLogMessage(const char *msg) {
BlockingMutexLock l(&crashreporter_info_mutex);
internal_strlcat(__crashreporter_info_buff__, msg,
sizeof(__crashreporter_info_buff__)); }

View File

@ -81,8 +81,6 @@
#include <sys/shm.h>
#undef _KERNEL
#undef INLINE // to avoid clashes with sanitizers' definitions
#undef IOC_DIRMASK
// Include these after system headers to avoid name clashes and ambiguities.

View File

@ -47,14 +47,14 @@ bool ReportFile::SupportsColors() {
return SupportsColoredOutput(fd);
}
static INLINE bool ReportSupportsColors() {
static inline bool ReportSupportsColors() {
return report_file.SupportsColors();
}
#else // SANITIZER_FUCHSIA
// Fuchsia's logs always go through post-processing that handles colorization.
static INLINE bool ReportSupportsColors() { return true; }
static inline bool ReportSupportsColors() { return true; }
#endif // !SANITIZER_FUCHSIA

View File

@ -44,7 +44,7 @@ static u32 Cookie;
// at compilation or at runtime.
static atomic_uint8_t HashAlgorithm = { CRC32Software };
INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
ATTR_inline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
// If the hardware CRC32 feature is defined here, it was enabled everywhere,
// as opposed to only for scudo_crc32.cpp. This means that other hardware
// specific instructions were likely emitted at other places, and as a
@ -71,31 +71,31 @@ INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
static BackendT &getBackend();
namespace Chunk {
static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
static inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
getHeaderSize());
}
static INLINE
static inline
const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
return reinterpret_cast<const AtomicPackedHeader *>(
reinterpret_cast<uptr>(Ptr) - getHeaderSize());
}
static INLINE bool isAligned(const void *Ptr) {
static inline bool isAligned(const void *Ptr) {
return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
}
// We can't use the offset member of the chunk itself, as we would double
// fetch it without any warranty that it wouldn't have been tampered. To
// prevent this, we work with a local copy of the header.
static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
static inline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
getHeaderSize() - (Header->Offset << MinAlignmentLog));
}
// Returns the usable size for a chunk, meaning the amount of bytes from the
// beginning of the user data to the end of the backend allocated chunk.
static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
static inline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
const uptr ClassId = Header->ClassId;
if (ClassId)
return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
@ -105,7 +105,7 @@ namespace Chunk {
}
// Returns the size the user requested when allocating the chunk.
static INLINE uptr getSize(const void *Ptr, UnpackedHeader *Header) {
static inline uptr getSize(const void *Ptr, UnpackedHeader *Header) {
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
if (Header->ClassId)
return SizeOrUnusedBytes;
@ -114,7 +114,7 @@ namespace Chunk {
}
// Compute the checksum of the chunk pointer and its header.
static INLINE u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
static inline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
UnpackedHeader ZeroChecksumHeader = *Header;
ZeroChecksumHeader.Checksum = 0;
uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
@ -126,7 +126,7 @@ namespace Chunk {
// Checks the validity of a chunk by verifying its checksum. It doesn't
// incur termination in the event of an invalid chunk.
static INLINE bool isValid(const void *Ptr) {
static inline bool isValid(const void *Ptr) {
PackedHeader NewPackedHeader =
atomic_load_relaxed(getConstAtomicHeader(Ptr));
UnpackedHeader NewUnpackedHeader =
@ -140,7 +140,7 @@ namespace Chunk {
COMPILER_CHECK(ChunkAvailable == 0);
// Loads and unpacks the header, verifying the checksum in the process.
static INLINE
static inline
void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
PackedHeader NewPackedHeader =
atomic_load_relaxed(getConstAtomicHeader(Ptr));
@ -151,7 +151,7 @@ namespace Chunk {
}
// Packs and stores the header, computing the checksum in the process.
static INLINE void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
static inline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
@ -160,7 +160,7 @@ namespace Chunk {
// Packs and stores the header, computing the checksum in the process. We
// compare the current header with the expected provided one to ensure that
// we are not being raced by a corruption occurring in another thread.
static INLINE void compareExchangeHeader(void *Ptr,
static inline void compareExchangeHeader(void *Ptr,
UnpackedHeader *NewUnpackedHeader,
UnpackedHeader *OldUnpackedHeader) {
NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);

View File

@ -85,7 +85,7 @@ static const u32 CRC32Table[] = {
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
INLINE u32 computeSoftwareCRC32(u32 Crc, uptr Data) {
inline u32 computeSoftwareCRC32(u32 Crc, uptr Data) {
for (uptr i = 0; i < sizeof(Data); i++) {
Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8);
Data >>= 8;

View File

@ -29,7 +29,7 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
void init();
void commitBack();
INLINE bool tryLock() {
inline bool tryLock() {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
@ -40,14 +40,14 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
return false;
}
INLINE void lock() {
inline void lock() {
atomic_store_relaxed(&Precedence, 0);
Mutex.Lock();
}
INLINE void unlock() { Mutex.Unlock(); }
inline void unlock() { Mutex.Unlock(); }
INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
private:
StaticSpinMutex Mutex;

View File

@ -121,7 +121,7 @@ bool hasHardwareCRC32ARMPosix() { return false; }
// initialized after the other globals, so we can check its value to know if
// calling getauxval is safe.
extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
INLINE bool areBionicGlobalsInitialized() {
inline bool areBionicGlobalsInitialized() {
return !SANITIZER_ANDROID || (&__progname && __progname);
}

View File

@ -20,7 +20,7 @@
namespace __scudo {
template <class Dest, class Source>
INLINE Dest bit_cast(const Source& source) {
inline Dest bit_cast(const Source& source) {
static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!");
Dest dest;
memcpy(&dest, &source, sizeof(dest));

View File

@ -22,7 +22,7 @@ class ScopedInterceptor {
LibIgnore *libignore();
#if !SANITIZER_GO
INLINE bool in_symbolizer() {
inline bool in_symbolizer() {
cur_thread_init();
return UNLIKELY(cur_thread()->in_symbolizer);
}

View File

@ -458,22 +458,22 @@ struct ThreadState {
ThreadState *cur_thread();
void set_cur_thread(ThreadState *thr);
void cur_thread_finalize();
INLINE void cur_thread_init() { }
inline void cur_thread_init() { }
#else
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
INLINE ThreadState *cur_thread() {
inline ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
}
INLINE void cur_thread_init() {
inline void cur_thread_init() {
ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
if (UNLIKELY(!thr->current))
thr->current = thr;
}
INLINE void set_cur_thread(ThreadState *thr) {
inline void set_cur_thread(ThreadState *thr) {
reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
}
INLINE void cur_thread_finalize() { }
inline void cur_thread_finalize() { }
#endif // SANITIZER_MAC || SANITIZER_ANDROID
#endif // SANITIZER_GO