[libc] Add support for C++20 'atomic_ref' type (#132302)

Summary:
C++20 introduced an atomic reference type, which more easily wraps
around the standard way of dealing with atomics. Instead of a dedicated
type, it allows you to treat an existing allocation as atomic.

This has no users yet, but I'm hoping to use it when I start finalizing
my GPU allocation interface, as it will need to handle atomic values
in-place that can't be done with placement new. Hopefully this is small
enough that we can just keep it in-tree until it's needed, but I'll
accept holding it here until it has a user.

I added one extension to allow implicit conversion and CTAD.
This commit is contained in:
Joseph Huber 2025-03-25 13:28:49 -05:00 committed by GitHub
parent fc5b4d4a9d
commit 9243f99d17
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 163 additions and 0 deletions

View File

@ -229,6 +229,154 @@ public:
LIBC_INLINE void set(T rhs) { val = rhs; }
};
template <typename T> struct AtomicRef {
static_assert(is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
is_move_constructible_v<T> && is_copy_assignable_v<T> &&
is_move_assignable_v<T>,
"AtomicRef<T> requires T to be trivially copyable, copy "
"constructible, move constructible, copy assignable, "
"and move assignable.");
static_assert(cpp::has_unique_object_representations_v<T>,
"AtomicRef<T> only supports types with unique object "
"representations.");
private:
T *ptr;
LIBC_INLINE static int order(MemoryOrder mem_ord) {
return static_cast<int>(mem_ord);
}
LIBC_INLINE static int scope(MemoryScope mem_scope) {
return static_cast<int>(mem_scope);
}
public:
// Constructor from T reference
LIBC_INLINE explicit constexpr AtomicRef(T &obj) : ptr(&obj) {}
// Non-standard Implicit conversion from T*
LIBC_INLINE constexpr AtomicRef(T *obj) : ptr(obj) {}
LIBC_INLINE AtomicRef(const AtomicRef &) = default;
LIBC_INLINE AtomicRef &operator=(const AtomicRef &) = default;
// Atomic load
LIBC_INLINE operator T() const { return load(); }
LIBC_INLINE T
load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
T res;
#if __has_builtin(__scoped_atomic_load)
__scoped_atomic_load(ptr, &res, order(mem_ord), scope(mem_scope));
#else
__atomic_load(ptr, &res, order(mem_ord));
#endif
return res;
}
// Atomic store
LIBC_INLINE T operator=(T rhs) const {
store(rhs);
return rhs;
}
LIBC_INLINE void
store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
#if __has_builtin(__scoped_atomic_store)
__scoped_atomic_store(ptr, &rhs, order(mem_ord), scope(mem_scope));
#else
__atomic_store(ptr, &rhs, order(mem_ord));
#endif
}
// Atomic compare exchange (strong)
LIBC_INLINE bool compare_exchange_strong(
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
return __atomic_compare_exchange(ptr, &expected, &desired, false,
order(mem_ord), order(mem_ord));
}
// Atomic compare exchange (strong, separate success/failure memory orders)
LIBC_INLINE bool compare_exchange_strong(
T &expected, T desired, MemoryOrder success_order,
MemoryOrder failure_order,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
return __atomic_compare_exchange(ptr, &expected, &desired, false,
order(success_order),
order(failure_order));
}
// Atomic exchange
LIBC_INLINE T
exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
T ret;
#if __has_builtin(__scoped_atomic_exchange)
__scoped_atomic_exchange(ptr, &desired, &ret, order(mem_ord),
scope(mem_scope));
#else
__atomic_exchange(ptr, &desired, &ret, order(mem_ord));
#endif
return ret;
}
LIBC_INLINE T fetch_add(
T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_add)
return __scoped_atomic_fetch_add(ptr, increment, order(mem_ord),
scope(mem_scope));
#else
return __atomic_fetch_add(ptr, increment, order(mem_ord));
#endif
}
LIBC_INLINE T
fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_or)
return __scoped_atomic_fetch_or(ptr, mask, order(mem_ord),
scope(mem_scope));
#else
return __atomic_fetch_or(ptr, mask, order(mem_ord));
#endif
}
LIBC_INLINE T fetch_and(
T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_and)
return __scoped_atomic_fetch_and(ptr, mask, order(mem_ord),
scope(mem_scope));
#else
return __atomic_fetch_and(ptr, mask, order(mem_ord));
#endif
}
LIBC_INLINE T fetch_sub(
T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
static_assert(cpp::is_integral_v<T>, "T must be an integral type.");
#if __has_builtin(__scoped_atomic_fetch_sub)
return __scoped_atomic_fetch_sub(ptr, decrement, order(mem_ord),
scope(mem_scope));
#else
return __atomic_fetch_sub(ptr, decrement, order(mem_ord));
#endif
}
};
// Permit CTAD when generating an atomic reference.
template <typename T> AtomicRef(T &) -> AtomicRef<T>;
// Issue a thread fence with the given memory ordering.
LIBC_INLINE void atomic_thread_fence(
MemoryOrder mem_ord,

View File

@ -50,3 +50,18 @@ TEST(LlvmLibcAtomicTest, TrivialCompositeData) {
ASSERT_EQ(old.a, 'a');
ASSERT_EQ(old.b, 'b');
}
TEST(LlvmLibcAtomicTest, AtomicRefTest) {
int val = 123;
LIBC_NAMESPACE::cpp::AtomicRef aint(val);
ASSERT_EQ(aint.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED), 123);
ASSERT_EQ(aint.fetch_add(1, LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED), 123);
aint = 1234;
ASSERT_EQ(aint.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED), 1234);
// Test the implicit construction from pointer.
auto fn = [](LIBC_NAMESPACE::cpp::AtomicRef<int> aint) -> int {
return aint.load(LIBC_NAMESPACE::cpp::MemoryOrder::RELAXED);
};
ASSERT_EQ(fn(&val), 1234);
}