2023-03-10 16:48:53 -06:00
|
|
|
//===-- Shared memory RPC client / server interface -------------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2023-03-20 09:29:43 -05:00
|
|
|
//
|
|
|
|
// This file implements a remote procedure call mechanism to communicate between
|
|
|
|
// heterogeneous devices that can share an address space atomically. We provide
|
|
|
|
// a client and a server to facilitate the remote call. The client makes request
|
|
|
|
// to the server using a shared communication channel. We use separate atomic
|
|
|
|
// signals to indicate which side, the client or the server is in ownership of
|
|
|
|
// the buffer.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2023-03-10 16:48:53 -06:00
|
|
|
|
|
|
|
#ifndef LLVM_LIBC_SRC_SUPPORT_RPC_RPC_H
|
|
|
|
#define LLVM_LIBC_SRC_SUPPORT_RPC_RPC_H
|
|
|
|
|
2023-03-30 09:50:56 -05:00
|
|
|
#include "rpc_util.h"
|
2023-03-10 16:48:53 -06:00
|
|
|
#include "src/__support/CPP/atomic.h"
|
2023-05-04 14:53:28 -05:00
|
|
|
#include "src/__support/CPP/functional.h"
|
2023-04-13 21:27:51 -05:00
|
|
|
#include "src/__support/CPP/optional.h"
|
2023-04-18 09:44:27 -05:00
|
|
|
#include "src/__support/GPU/utils.h"
|
2023-04-13 21:27:51 -05:00
|
|
|
#include "src/string/memory_utils/memcpy_implementations.h"
|
2023-03-10 16:48:53 -06:00
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
|
|
|
|
namespace __llvm_libc {
|
|
|
|
namespace rpc {
|
|
|
|
|
|
|
|
/// A fixed size channel used to communicate between the RPC client and server.
|
2023-05-04 14:53:28 -05:00
|
|
|
struct Buffer {
|
|
|
|
uint64_t data[8];
|
2023-03-10 16:48:53 -06:00
|
|
|
};
|
2023-04-13 21:27:51 -05:00
|
|
|
static_assert(sizeof(Buffer) == 64, "Buffer size mismatch");
|
2023-03-10 16:48:53 -06:00
|
|
|
|
2023-05-04 14:53:28 -05:00
|
|
|
/// The information associated with a packet. This indicates which operations to
|
|
|
|
/// perform and which threads are active in the slots.
|
|
|
|
struct Header {
|
|
|
|
uint64_t mask;
|
|
|
|
uint16_t opcode;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// The data payload for the associated packet. We provide enough space for each
|
|
|
|
/// thread in the cooperating lane to have a buffer.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <uint32_t lane_size = gpu::LANE_SIZE> struct Payload {
|
|
|
|
Buffer slot[lane_size];
|
2023-05-04 14:53:28 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
/// A packet used to share data between the client and server across an entire
|
|
|
|
/// lane. We use a lane as the minimum granularity for execution.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <uint32_t lane_size = gpu::LANE_SIZE> struct alignas(64) Packet {
|
2023-05-04 14:53:28 -05:00
|
|
|
Header header;
|
2023-06-19 15:00:04 -05:00
|
|
|
Payload<lane_size> payload;
|
2023-05-04 14:53:28 -05:00
|
|
|
};
|
|
|
|
|
2023-05-01 12:10:04 -05:00
|
|
|
// TODO: This should be configured by the server and passed in. The general rule
|
|
|
|
// of thumb is that you should have at least as many ports as possible
|
|
|
|
// concurrent work items on the GPU to mitigate the lack offorward
|
|
|
|
// progress guarantees on the GPU.
|
2023-05-11 08:21:12 -05:00
|
|
|
constexpr uint64_t DEFAULT_PORT_COUNT = 64;
|
2023-05-01 12:10:04 -05:00
|
|
|
|
2023-03-10 16:48:53 -06:00
|
|
|
/// A common process used to synchronize communication between a client and a
|
2023-05-15 12:40:15 -05:00
|
|
|
/// server. The process contains a read-only inbox and a write-only outbox used
|
|
|
|
/// for signaling ownership of the shared buffer between both sides. We assign
|
|
|
|
/// ownership of the buffer to the client if the inbox and outbox bits match,
|
|
|
|
/// otherwise it is owned by the server.
|
2023-04-13 21:27:51 -05:00
|
|
|
///
|
2023-05-15 12:40:15 -05:00
|
|
|
/// This process is designed to allow the client and the server to exchange data
|
|
|
|
/// using a fixed size packet in a mostly arbitrary order using the 'send' and
|
|
|
|
/// 'recv' operations. The following restrictions to this scheme apply:
|
|
|
|
/// - The client will always start with a 'send' operation.
|
|
|
|
/// - The server will always start with a 'recv' operation.
|
|
|
|
/// - Every 'send' or 'recv' call is mirrored by the other process.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <bool Invert, uint32_t lane_size> struct Process {
|
2023-03-29 15:24:58 -05:00
|
|
|
LIBC_INLINE Process() = default;
|
2023-05-05 11:58:06 -05:00
|
|
|
LIBC_INLINE Process(const Process &) = delete;
|
|
|
|
LIBC_INLINE Process &operator=(const Process &) = delete;
|
|
|
|
LIBC_INLINE Process(Process &&) = default;
|
|
|
|
LIBC_INLINE Process &operator=(Process &&) = default;
|
2023-03-29 15:24:58 -05:00
|
|
|
LIBC_INLINE ~Process() = default;
|
|
|
|
|
2023-06-19 15:00:04 -05:00
|
|
|
template <bool T, uint32_t S> friend struct Port;
|
2023-05-16 13:39:28 -05:00
|
|
|
|
|
|
|
protected:
|
2023-05-01 12:10:04 -05:00
|
|
|
uint64_t port_count;
|
2023-03-10 16:48:53 -06:00
|
|
|
cpp::Atomic<uint32_t> *inbox;
|
|
|
|
cpp::Atomic<uint32_t> *outbox;
|
2023-06-19 15:00:04 -05:00
|
|
|
Packet<lane_size> *packet;
|
2023-03-10 16:48:53 -06:00
|
|
|
|
2023-05-11 08:21:12 -05:00
|
|
|
cpp::Atomic<uint32_t> lock[DEFAULT_PORT_COUNT] = {0};
|
2023-05-11 00:39:54 +01:00
|
|
|
|
2023-05-16 13:39:28 -05:00
|
|
|
public:
|
2023-03-10 16:48:53 -06:00
|
|
|
/// Initialize the communication channels.
|
2023-06-19 15:00:04 -05:00
|
|
|
LIBC_INLINE void reset(uint64_t port_count, void *buffer) {
|
2023-05-11 00:39:54 +01:00
|
|
|
this->port_count = port_count;
|
2023-05-11 03:04:55 +01:00
|
|
|
this->inbox = reinterpret_cast<cpp::Atomic<uint32_t> *>(
|
2023-05-15 09:46:56 -05:00
|
|
|
advance(buffer, inbox_offset(port_count)));
|
2023-05-11 03:04:55 +01:00
|
|
|
this->outbox = reinterpret_cast<cpp::Atomic<uint32_t> *>(
|
2023-05-15 09:46:56 -05:00
|
|
|
advance(buffer, outbox_offset(port_count)));
|
2023-06-19 15:00:04 -05:00
|
|
|
this->packet = reinterpret_cast<Packet<lane_size> *>(
|
|
|
|
advance(buffer, buffer_offset(port_count)));
|
2023-05-11 03:04:55 +01:00
|
|
|
}
|
|
|
|
|
2023-06-05 15:32:38 -05:00
|
|
|
/// Returns the beginning of the unified buffer. Intended for initializing the
|
|
|
|
/// client after the server has been started.
|
|
|
|
LIBC_INLINE void *get_buffer_start() const { return Invert ? outbox : inbox; }
|
|
|
|
|
2023-05-15 09:46:56 -05:00
|
|
|
/// Allocate a memory buffer sufficient to store the following equivalent
|
|
|
|
/// representation in memory.
|
|
|
|
///
|
|
|
|
/// struct Equivalent {
|
|
|
|
/// Atomic<uint32_t> primary[port_count];
|
|
|
|
/// Atomic<uint32_t> secondary[port_count];
|
|
|
|
/// Packet buffer[port_count];
|
2023-05-11 03:04:55 +01:00
|
|
|
/// };
|
2023-06-19 15:00:04 -05:00
|
|
|
LIBC_INLINE static uint64_t allocation_size(uint64_t port_count) {
|
|
|
|
return buffer_offset(port_count) + buffer_bytes(port_count);
|
2023-05-01 12:10:04 -05:00
|
|
|
}
|
|
|
|
|
2023-05-16 13:39:28 -05:00
|
|
|
protected:
|
2023-05-15 12:40:15 -05:00
|
|
|
/// Retrieve the inbox state from memory shared between processes.
|
2023-05-04 00:21:18 +01:00
|
|
|
LIBC_INLINE uint32_t load_inbox(uint64_t index) {
|
2023-05-15 12:40:15 -05:00
|
|
|
return inbox[index].load(cpp::MemoryOrder::RELAXED);
|
2023-05-04 00:21:18 +01:00
|
|
|
}
|
|
|
|
|
2023-05-08 17:37:53 +01:00
|
|
|
/// Retrieve the outbox state from memory shared between processes.
|
|
|
|
LIBC_INLINE uint32_t load_outbox(uint64_t index) {
|
|
|
|
return outbox[index].load(cpp::MemoryOrder::RELAXED);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Signal to the other process that this one is finished with the buffer.
|
|
|
|
/// Equivalent to loading outbox followed by store of the inverted value
|
|
|
|
/// The outbox is write only by this warp and tracking the value locally is
|
|
|
|
/// cheaper than calling load_outbox to get the value to store.
|
|
|
|
LIBC_INLINE uint32_t invert_outbox(uint64_t index, uint32_t current_outbox) {
|
|
|
|
uint32_t inverted_outbox = !current_outbox;
|
|
|
|
outbox[index].store(inverted_outbox, cpp::MemoryOrder::RELAXED);
|
|
|
|
return inverted_outbox;
|
|
|
|
}
|
|
|
|
|
2023-05-15 12:40:15 -05:00
|
|
|
/// Determines if this process needs to wait for ownership of the buffer. We
|
|
|
|
/// invert the condition on one of the processes to indicate that if one
|
|
|
|
/// process owns the buffer then the other does not.
|
2023-05-04 13:09:34 +01:00
|
|
|
LIBC_INLINE static bool buffer_unavailable(uint32_t in, uint32_t out) {
|
2023-05-15 12:40:15 -05:00
|
|
|
bool cond = in != out;
|
|
|
|
return Invert ? !cond : cond;
|
2023-04-13 21:27:51 -05:00
|
|
|
}
|
2023-05-04 13:58:24 +01:00
|
|
|
|
|
|
|
/// Attempt to claim the lock at index. Return true on lock taken.
|
2023-05-04 22:30:53 +01:00
|
|
|
/// lane_mask is a bitmap of the threads in the warp that would hold the
|
|
|
|
/// single lock on success, e.g. the result of gpu::get_lane_mask()
|
2023-05-04 13:58:24 +01:00
|
|
|
/// The lock is held when the zeroth bit of the uint32_t at lock[index]
|
|
|
|
/// is set, and available when that bit is clear. Bits [1, 32) are zero.
|
|
|
|
/// Or with one is a no-op when the lock is already held.
|
2023-05-04 22:30:53 +01:00
|
|
|
[[clang::convergent]] LIBC_INLINE bool try_lock(uint64_t lane_mask,
|
|
|
|
uint64_t index) {
|
|
|
|
// On amdgpu, test and set to lock[index] and a sync_lane would suffice
|
|
|
|
// On volta, need to handle differences between the threads running and
|
|
|
|
// the threads that were detected in the previous call to get_lane_mask()
|
|
|
|
//
|
|
|
|
// All threads in lane_mask try to claim the lock. At most one can succeed.
|
|
|
|
// There may be threads active which are not in lane mask which must not
|
|
|
|
// succeed in taking the lock, as otherwise it will leak. This is handled
|
|
|
|
// by making threads which are not in lane_mask or with 0, a no-op.
|
|
|
|
uint32_t id = gpu::get_lane_id();
|
|
|
|
bool id_in_lane_mask = lane_mask & (1ul << id);
|
|
|
|
|
|
|
|
// All threads in the warp call fetch_or. Possibly at the same time.
|
|
|
|
bool before =
|
|
|
|
lock[index].fetch_or(id_in_lane_mask, cpp::MemoryOrder::RELAXED);
|
|
|
|
uint64_t packed = gpu::ballot(lane_mask, before);
|
|
|
|
|
|
|
|
// If every bit set in lane_mask is also set in packed, every single thread
|
|
|
|
// in the warp failed to get the lock. Ballot returns unset for threads not
|
|
|
|
// in the lane mask.
|
|
|
|
//
|
|
|
|
// Cases, per thread:
|
|
|
|
// mask==0 -> unspecified before, discarded by ballot -> 0
|
|
|
|
// mask==1 and before==0 (success), set zero by ballot -> 0
|
|
|
|
// mask==1 and before==1 (failure), set one by ballot -> 1
|
|
|
|
//
|
|
|
|
// mask != packed implies at least one of the threads got the lock
|
|
|
|
// atomic semantics of fetch_or mean at most one of the threads for the lock
|
2023-06-22 17:46:08 +01:00
|
|
|
|
|
|
|
// If holding the lock then the caller can load values knowing said loads
|
|
|
|
// won't move past the lock. No such guarantee is needed if the lock acquire
|
|
|
|
// failed. This conditional branch is expected to fold in the caller after
|
|
|
|
// inlining the current function.
|
|
|
|
bool holding_lock = lane_mask != packed;
|
|
|
|
if (holding_lock)
|
|
|
|
atomic_thread_fence(cpp::MemoryOrder::ACQUIRE);
|
|
|
|
return holding_lock;
|
2023-05-04 13:58:24 +01:00
|
|
|
}
|
|
|
|
|
2023-05-04 14:53:28 -05:00
|
|
|
/// Unlock the lock at index. We need a lane sync to keep this function
|
|
|
|
/// convergent, otherwise the compiler will sink the store and deadlock.
|
2023-05-04 22:30:53 +01:00
|
|
|
[[clang::convergent]] LIBC_INLINE void unlock(uint64_t lane_mask,
|
|
|
|
uint64_t index) {
|
2023-06-22 17:46:08 +01:00
|
|
|
// Do not move any writes past the unlock
|
|
|
|
atomic_thread_fence(cpp::MemoryOrder::RELEASE);
|
|
|
|
|
2023-05-04 22:30:53 +01:00
|
|
|
// Wait for other threads in the warp to finish using the lock
|
|
|
|
gpu::sync_lane(lane_mask);
|
|
|
|
|
|
|
|
// Use exactly one thread to clear the bit at position 0 in lock[index]
|
|
|
|
// Must restrict to a single thread to avoid one thread dropping the lock,
|
|
|
|
// then an unrelated warp claiming the lock, then a second thread in this
|
|
|
|
// warp dropping the lock again.
|
|
|
|
uint32_t and_mask = ~(rpc::is_first_lane(lane_mask) ? 1 : 0);
|
|
|
|
lock[index].fetch_and(and_mask, cpp::MemoryOrder::RELAXED);
|
2023-05-04 14:53:28 -05:00
|
|
|
gpu::sync_lane(lane_mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Invokes a function accross every active buffer across the total lane size.
|
|
|
|
LIBC_INLINE void invoke_rpc(cpp::function<void(Buffer *)> fn,
|
2023-06-19 15:00:04 -05:00
|
|
|
Packet<lane_size> &packet) {
|
2023-05-04 14:53:28 -05:00
|
|
|
if constexpr (is_process_gpu()) {
|
2023-05-01 12:10:04 -05:00
|
|
|
fn(&packet.payload.slot[gpu::get_lane_id()]);
|
2023-05-04 14:53:28 -05:00
|
|
|
} else {
|
|
|
|
for (uint32_t i = 0; i < lane_size; i += gpu::get_lane_size())
|
2023-05-01 12:10:04 -05:00
|
|
|
if (packet.header.mask & 1ul << i)
|
|
|
|
fn(&packet.payload.slot[i]);
|
2023-05-04 14:53:28 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Alternate version that also provides the index of the current lane.
|
|
|
|
LIBC_INLINE void invoke_rpc(cpp::function<void(Buffer *, uint32_t)> fn,
|
2023-06-19 15:00:04 -05:00
|
|
|
Packet<lane_size> &packet) {
|
2023-05-04 14:53:28 -05:00
|
|
|
if constexpr (is_process_gpu()) {
|
2023-05-01 12:10:04 -05:00
|
|
|
fn(&packet.payload.slot[gpu::get_lane_id()], gpu::get_lane_id());
|
2023-05-04 14:53:28 -05:00
|
|
|
} else {
|
|
|
|
for (uint32_t i = 0; i < lane_size; i += gpu::get_lane_size())
|
2023-05-01 12:10:04 -05:00
|
|
|
if (packet.header.mask & 1ul << i)
|
|
|
|
fn(&packet.payload.slot[i], i);
|
2023-05-04 14:53:28 -05:00
|
|
|
}
|
2023-05-04 13:58:24 +01:00
|
|
|
}
|
2023-05-11 03:04:55 +01:00
|
|
|
|
2023-05-15 09:46:56 -05:00
|
|
|
/// Number of bytes to allocate for an inbox or outbox.
|
|
|
|
LIBC_INLINE static uint64_t mailbox_bytes(uint64_t port_count) {
|
2023-05-11 03:04:55 +01:00
|
|
|
return port_count * sizeof(cpp::Atomic<uint32_t>);
|
|
|
|
}
|
|
|
|
|
2023-05-15 09:46:56 -05:00
|
|
|
/// Number of bytes to allocate for the buffer containing the packets.
|
2023-06-19 15:00:04 -05:00
|
|
|
LIBC_INLINE static uint64_t buffer_bytes(uint64_t port_count) {
|
|
|
|
return port_count * sizeof(Packet<lane_size>);
|
2023-05-11 03:04:55 +01:00
|
|
|
}
|
|
|
|
|
2023-05-15 09:46:56 -05:00
|
|
|
/// Offset of the inbox in memory. This is the same as the outbox if inverted.
|
|
|
|
LIBC_INLINE static uint64_t inbox_offset(uint64_t port_count) {
|
2023-05-15 12:40:15 -05:00
|
|
|
return Invert ? mailbox_bytes(port_count) : 0;
|
2023-05-11 03:04:55 +01:00
|
|
|
}
|
2023-05-15 09:46:56 -05:00
|
|
|
|
|
|
|
/// Offset of the outbox in memory. This is the same as the inbox if inverted.
|
|
|
|
LIBC_INLINE static uint64_t outbox_offset(uint64_t port_count) {
|
2023-05-15 12:40:15 -05:00
|
|
|
return Invert ? 0 : mailbox_bytes(port_count);
|
2023-05-11 03:04:55 +01:00
|
|
|
}
|
2023-05-15 09:46:56 -05:00
|
|
|
|
|
|
|
/// Offset of the buffer containing the packets after the inbox and outbox.
|
|
|
|
LIBC_INLINE static uint64_t buffer_offset(uint64_t port_count) {
|
2023-06-19 15:00:04 -05:00
|
|
|
return align_up(2 * mailbox_bytes(port_count), alignof(Packet<lane_size>));
|
2023-05-11 03:04:55 +01:00
|
|
|
}
|
2023-04-13 21:27:51 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
/// The port provides the interface to communicate between the multiple
|
|
|
|
/// processes. A port is conceptually an index into the memory provided by the
|
|
|
|
/// underlying process that is guarded by a lock bit.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <bool T, uint32_t S> struct Port {
|
|
|
|
LIBC_INLINE Port(Process<T, S> &process, uint64_t lane_mask, uint64_t index,
|
2023-05-04 12:33:19 -05:00
|
|
|
uint32_t out)
|
2023-05-10 18:14:51 -05:00
|
|
|
: process(process), lane_mask(lane_mask), index(index), out(out),
|
2023-05-15 16:22:27 -05:00
|
|
|
receive(false), owns_buffer(true) {}
|
2023-05-08 09:04:14 -05:00
|
|
|
LIBC_INLINE ~Port() = default;
|
|
|
|
|
|
|
|
private:
|
2023-05-05 11:58:06 -05:00
|
|
|
LIBC_INLINE Port(const Port &) = delete;
|
2023-04-13 21:27:51 -05:00
|
|
|
LIBC_INLINE Port &operator=(const Port &) = delete;
|
2023-05-05 11:58:06 -05:00
|
|
|
LIBC_INLINE Port(Port &&) = default;
|
|
|
|
LIBC_INLINE Port &operator=(Port &&) = default;
|
2023-04-13 21:27:51 -05:00
|
|
|
|
2023-05-08 09:04:14 -05:00
|
|
|
friend struct Client;
|
2023-06-19 15:00:04 -05:00
|
|
|
template <uint32_t U> friend struct Server;
|
|
|
|
friend class cpp::optional<Port<T, S>>;
|
2023-05-08 09:04:14 -05:00
|
|
|
|
|
|
|
public:
|
2023-04-13 21:27:51 -05:00
|
|
|
template <typename U> LIBC_INLINE void recv(U use);
|
|
|
|
template <typename F> LIBC_INLINE void send(F fill);
|
|
|
|
template <typename F, typename U>
|
|
|
|
LIBC_INLINE void send_and_recv(F fill, U use);
|
|
|
|
template <typename W> LIBC_INLINE void recv_and_send(W work);
|
2023-05-11 11:11:24 -05:00
|
|
|
LIBC_INLINE void send_n(const void *const *src, uint64_t *size);
|
2023-04-13 21:27:51 -05:00
|
|
|
LIBC_INLINE void send_n(const void *src, uint64_t size);
|
2023-05-11 11:11:24 -05:00
|
|
|
template <typename A>
|
|
|
|
LIBC_INLINE void recv_n(void **dst, uint64_t *size, A &&alloc);
|
2023-04-13 21:27:51 -05:00
|
|
|
|
|
|
|
LIBC_INLINE uint16_t get_opcode() const {
|
2023-06-19 15:00:04 -05:00
|
|
|
return process.packet[index].header.opcode;
|
2023-04-13 21:27:51 -05:00
|
|
|
}
|
|
|
|
|
2023-05-10 18:14:51 -05:00
|
|
|
LIBC_INLINE void close() {
|
2023-05-15 16:22:27 -05:00
|
|
|
// The server is passive, if it own the buffer when it closes we need to
|
|
|
|
// give ownership back to the client.
|
|
|
|
if (owns_buffer && T)
|
2023-05-10 18:14:51 -05:00
|
|
|
out = process.invert_outbox(index, out);
|
|
|
|
process.unlock(lane_mask, index);
|
|
|
|
}
|
2023-04-13 21:27:51 -05:00
|
|
|
|
|
|
|
private:
|
2023-06-19 15:00:04 -05:00
|
|
|
Process<T, S> &process;
|
2023-05-04 12:33:19 -05:00
|
|
|
uint64_t lane_mask;
|
2023-04-13 21:27:51 -05:00
|
|
|
uint64_t index;
|
|
|
|
uint32_t out;
|
2023-05-10 18:14:51 -05:00
|
|
|
bool receive;
|
2023-05-15 16:22:27 -05:00
|
|
|
bool owns_buffer;
|
2023-03-10 16:48:53 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
/// The RPC client used to make requests to the server.
|
2023-06-19 15:00:04 -05:00
|
|
|
struct Client : public Process<false, gpu::LANE_SIZE> {
|
2023-03-29 15:24:58 -05:00
|
|
|
LIBC_INLINE Client() = default;
|
2023-05-05 11:58:06 -05:00
|
|
|
LIBC_INLINE Client(const Client &) = delete;
|
|
|
|
LIBC_INLINE Client &operator=(const Client &) = delete;
|
2023-03-29 15:24:58 -05:00
|
|
|
LIBC_INLINE ~Client() = default;
|
|
|
|
|
2023-06-19 15:00:04 -05:00
|
|
|
using Port = rpc::Port<false, gpu::LANE_SIZE>;
|
2023-05-08 08:38:24 -05:00
|
|
|
template <uint16_t opcode> LIBC_INLINE cpp::optional<Port> try_open();
|
|
|
|
template <uint16_t opcode> LIBC_INLINE Port open();
|
2023-03-10 16:48:53 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
/// The RPC server used to respond to the client.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <uint32_t lane_size> struct Server : public Process<true, lane_size> {
|
2023-03-29 15:24:58 -05:00
|
|
|
LIBC_INLINE Server() = default;
|
2023-05-05 11:58:06 -05:00
|
|
|
LIBC_INLINE Server(const Server &) = delete;
|
|
|
|
LIBC_INLINE Server &operator=(const Server &) = delete;
|
2023-03-29 15:24:58 -05:00
|
|
|
LIBC_INLINE ~Server() = default;
|
|
|
|
|
2023-06-19 15:00:04 -05:00
|
|
|
using Port = rpc::Port<true, lane_size>;
|
2023-04-13 21:27:51 -05:00
|
|
|
LIBC_INLINE cpp::optional<Port> try_open();
|
|
|
|
LIBC_INLINE Port open();
|
2023-03-10 16:48:53 -06:00
|
|
|
};
|
|
|
|
|
2023-04-13 21:27:51 -05:00
|
|
|
/// Applies \p fill to the shared buffer and initiates a send operation.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <bool T, uint32_t S>
|
|
|
|
template <typename F>
|
|
|
|
LIBC_INLINE void Port<T, S>::send(F fill) {
|
2023-05-15 16:22:27 -05:00
|
|
|
uint32_t in = owns_buffer ? out ^ T : process.load_inbox(index);
|
2023-04-13 21:27:51 -05:00
|
|
|
|
|
|
|
// We need to wait until we own the buffer before sending.
|
2023-06-19 15:00:04 -05:00
|
|
|
while (Process<T, S>::buffer_unavailable(in, out)) {
|
2023-04-13 21:27:51 -05:00
|
|
|
sleep_briefly();
|
2023-05-04 00:21:18 +01:00
|
|
|
in = process.load_inbox(index);
|
2023-03-10 16:48:53 -06:00
|
|
|
}
|
2023-04-13 21:27:51 -05:00
|
|
|
|
|
|
|
// Apply the \p fill function to initialize the buffer and release the memory.
|
2023-06-19 15:00:04 -05:00
|
|
|
process.invoke_rpc(fill, process.packet[index]);
|
2023-04-13 21:27:51 -05:00
|
|
|
atomic_thread_fence(cpp::MemoryOrder::RELEASE);
|
2023-05-08 17:37:53 +01:00
|
|
|
out = process.invert_outbox(index, out);
|
2023-05-15 16:22:27 -05:00
|
|
|
owns_buffer = false;
|
2023-05-10 18:14:51 -05:00
|
|
|
receive = false;
|
2023-04-13 21:27:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Applies \p use to the shared buffer and acknowledges the send.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <bool T, uint32_t S>
|
|
|
|
template <typename U>
|
|
|
|
LIBC_INLINE void Port<T, S>::recv(U use) {
|
2023-05-10 18:14:51 -05:00
|
|
|
// We only exchange ownership of the buffer during a receive if we are waiting
|
|
|
|
// for a previous receive to finish.
|
2023-05-15 16:22:27 -05:00
|
|
|
if (receive) {
|
2023-05-10 18:14:51 -05:00
|
|
|
out = process.invert_outbox(index, out);
|
2023-05-15 16:22:27 -05:00
|
|
|
owns_buffer = false;
|
|
|
|
}
|
2023-05-10 18:14:51 -05:00
|
|
|
|
2023-05-15 16:22:27 -05:00
|
|
|
uint32_t in = owns_buffer ? out ^ T : process.load_inbox(index);
|
2023-04-13 21:27:51 -05:00
|
|
|
|
|
|
|
// We need to wait until we own the buffer before receiving.
|
2023-06-19 15:00:04 -05:00
|
|
|
while (Process<T, S>::buffer_unavailable(in, out)) {
|
2023-04-13 21:27:51 -05:00
|
|
|
sleep_briefly();
|
2023-05-04 00:21:18 +01:00
|
|
|
in = process.load_inbox(index);
|
2023-03-10 16:48:53 -06:00
|
|
|
}
|
2023-04-13 21:27:51 -05:00
|
|
|
atomic_thread_fence(cpp::MemoryOrder::ACQUIRE);
|
|
|
|
|
|
|
|
// Apply the \p use function to read the memory out of the buffer.
|
2023-06-19 15:00:04 -05:00
|
|
|
process.invoke_rpc(use, process.packet[index]);
|
2023-05-10 18:14:51 -05:00
|
|
|
receive = true;
|
2023-05-15 16:22:27 -05:00
|
|
|
owns_buffer = true;
|
2023-04-13 21:27:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Combines a send and receive into a single function.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <bool T, uint32_t S>
|
2023-04-13 21:27:51 -05:00
|
|
|
template <typename F, typename U>
|
2023-06-19 15:00:04 -05:00
|
|
|
LIBC_INLINE void Port<T, S>::send_and_recv(F fill, U use) {
|
2023-04-13 21:27:51 -05:00
|
|
|
send(fill);
|
|
|
|
recv(use);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Combines a receive and send operation into a single function. The \p work
|
|
|
|
/// function modifies the buffer in-place and the send is only used to initiate
|
|
|
|
/// the copy back.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <bool T, uint32_t S>
|
2023-05-04 00:21:18 +01:00
|
|
|
template <typename W>
|
2023-06-19 15:00:04 -05:00
|
|
|
LIBC_INLINE void Port<T, S>::recv_and_send(W work) {
|
2023-04-13 21:27:51 -05:00
|
|
|
recv(work);
|
|
|
|
send([](Buffer *) { /* no-op */ });
|
|
|
|
}
|
|
|
|
|
2023-05-19 11:17:42 -05:00
|
|
|
/// Helper routine to simplify the interface when sending from the GPU using
|
|
|
|
/// thread private pointers to the underlying value.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <bool T, uint32_t S>
|
|
|
|
LIBC_INLINE void Port<T, S>::send_n(const void *src, uint64_t size) {
|
2023-05-19 11:17:42 -05:00
|
|
|
static_assert(is_process_gpu(), "Only valid when running on the GPU");
|
|
|
|
const void **src_ptr = &src;
|
|
|
|
uint64_t *size_ptr = &size;
|
|
|
|
send_n(src_ptr, size_ptr);
|
|
|
|
}
|
|
|
|
|
2023-04-13 21:27:51 -05:00
|
|
|
/// Sends an arbitrarily sized data buffer \p src across the shared channel in
|
|
|
|
/// multiples of the packet length.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <bool T, uint32_t S>
|
|
|
|
LIBC_INLINE void Port<T, S>::send_n(const void *const *src, uint64_t *size) {
|
2023-05-11 11:11:24 -05:00
|
|
|
uint64_t num_sends = 0;
|
|
|
|
send([&](Buffer *buffer, uint32_t id) {
|
|
|
|
reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(size, id);
|
|
|
|
num_sends = is_process_gpu() ? lane_value(size, id)
|
|
|
|
: max(lane_value(size, id), num_sends);
|
2023-05-19 11:17:42 -05:00
|
|
|
uint64_t len =
|
|
|
|
lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
|
|
|
|
? sizeof(Buffer::data) - sizeof(uint64_t)
|
|
|
|
: lane_value(size, id);
|
|
|
|
inline_memcpy(&buffer->data[1], lane_value(src, id), len);
|
2023-05-04 14:53:28 -05:00
|
|
|
});
|
2023-05-19 11:17:42 -05:00
|
|
|
uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
|
2023-06-19 15:00:04 -05:00
|
|
|
uint64_t mask = process.packet[index].header.mask;
|
2023-05-23 12:19:56 -05:00
|
|
|
while (gpu::ballot(mask, idx < num_sends)) {
|
2023-05-11 11:11:24 -05:00
|
|
|
send([=](Buffer *buffer, uint32_t id) {
|
2023-05-19 11:17:42 -05:00
|
|
|
uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
|
|
|
|
? sizeof(Buffer::data)
|
|
|
|
: lane_value(size, id) - idx;
|
2023-05-11 11:11:24 -05:00
|
|
|
if (idx < lane_value(size, id))
|
2023-05-19 11:17:42 -05:00
|
|
|
inline_memcpy(buffer->data, advance(lane_value(src, id), idx), len);
|
2023-04-13 21:27:51 -05:00
|
|
|
});
|
2023-05-19 14:58:32 -05:00
|
|
|
idx += sizeof(Buffer::data);
|
2023-03-10 16:48:53 -06:00
|
|
|
}
|
2023-04-13 21:27:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Receives an arbitrarily sized data buffer across the shared channel in
|
|
|
|
/// multiples of the packet length. The \p alloc function is called with the
|
|
|
|
/// size of the data so that we can initialize the size of the \p dst buffer.
|
2023-06-19 15:00:04 -05:00
|
|
|
template <bool T, uint32_t S>
|
2023-05-04 00:21:18 +01:00
|
|
|
template <typename A>
|
2023-06-19 15:00:04 -05:00
|
|
|
LIBC_INLINE void Port<T, S>::recv_n(void **dst, uint64_t *size, A &&alloc) {
|
2023-05-11 11:11:24 -05:00
|
|
|
uint64_t num_recvs = 0;
|
|
|
|
recv([&](Buffer *buffer, uint32_t id) {
|
|
|
|
lane_value(size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];
|
|
|
|
lane_value(dst, id) =
|
|
|
|
reinterpret_cast<uint8_t *>(alloc(lane_value(size, id)));
|
|
|
|
num_recvs = is_process_gpu() ? lane_value(size, id)
|
|
|
|
: max(lane_value(size, id), num_recvs);
|
2023-05-19 11:17:42 -05:00
|
|
|
uint64_t len =
|
|
|
|
lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
|
|
|
|
? sizeof(Buffer::data) - sizeof(uint64_t)
|
|
|
|
: lane_value(size, id);
|
|
|
|
inline_memcpy(lane_value(dst, id), &buffer->data[1], len);
|
2023-05-11 11:11:24 -05:00
|
|
|
});
|
2023-05-19 11:17:42 -05:00
|
|
|
uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
|
2023-06-19 15:00:04 -05:00
|
|
|
uint64_t mask = process.packet[index].header.mask;
|
2023-05-19 14:58:32 -05:00
|
|
|
while (gpu::ballot(mask, idx < num_recvs)) {
|
2023-05-11 11:11:24 -05:00
|
|
|
recv([=](Buffer *buffer, uint32_t id) {
|
|
|
|
uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
|
|
|
|
? sizeof(Buffer::data)
|
|
|
|
: lane_value(size, id) - idx;
|
|
|
|
if (idx < lane_value(size, id))
|
2023-05-19 11:17:42 -05:00
|
|
|
inline_memcpy(advance(lane_value(dst, id), idx), buffer->data, len);
|
2023-04-13 21:27:51 -05:00
|
|
|
});
|
2023-05-19 14:58:32 -05:00
|
|
|
idx += sizeof(Buffer::data);
|
2023-03-10 16:48:53 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-13 21:27:51 -05:00
|
|
|
/// Attempts to open a port to use as the client. The client can only open a
|
|
|
|
/// port if we find an index that is in a valid sending state. That is, there
|
|
|
|
/// are send operations pending that haven't been serviced on this port. Each
|
|
|
|
/// port instance uses an associated \p opcode to tell the server what to do.
|
2023-05-08 08:38:24 -05:00
|
|
|
template <uint16_t opcode>
|
2023-05-04 14:53:28 -05:00
|
|
|
[[clang::convergent]] LIBC_INLINE cpp::optional<Client::Port>
|
2023-05-08 08:38:24 -05:00
|
|
|
Client::try_open() {
|
2023-05-01 12:10:04 -05:00
|
|
|
// Perform a naive linear scan for a port that can be opened to send data.
|
2023-06-19 15:00:04 -05:00
|
|
|
for (uint64_t index = 0; index < this->port_count; ++index) {
|
2023-05-01 12:10:04 -05:00
|
|
|
// Attempt to acquire the lock on this index.
|
|
|
|
uint64_t lane_mask = gpu::get_lane_mask();
|
2023-06-19 15:00:04 -05:00
|
|
|
if (!this->try_lock(lane_mask, index))
|
2023-05-01 12:10:04 -05:00
|
|
|
continue;
|
|
|
|
|
2023-06-19 15:00:04 -05:00
|
|
|
uint32_t in = this->load_inbox(index);
|
|
|
|
uint32_t out = this->load_outbox(index);
|
2023-05-01 12:10:04 -05:00
|
|
|
|
|
|
|
// Once we acquire the index we need to check if we are in a valid sending
|
|
|
|
// state.
|
2023-06-19 15:00:04 -05:00
|
|
|
if (this->buffer_unavailable(in, out)) {
|
|
|
|
this->unlock(lane_mask, index);
|
2023-05-01 12:10:04 -05:00
|
|
|
continue;
|
|
|
|
}
|
2023-04-13 21:27:51 -05:00
|
|
|
|
2023-05-01 12:10:04 -05:00
|
|
|
if (is_first_lane(lane_mask)) {
|
2023-06-19 15:00:04 -05:00
|
|
|
this->packet[index].header.opcode = opcode;
|
|
|
|
this->packet[index].header.mask = lane_mask;
|
2023-05-01 12:10:04 -05:00
|
|
|
}
|
|
|
|
gpu::sync_lane(lane_mask);
|
|
|
|
return Port(*this, lane_mask, index, out);
|
2023-05-04 14:53:28 -05:00
|
|
|
}
|
2023-05-01 12:10:04 -05:00
|
|
|
return cpp::nullopt;
|
2023-04-13 21:27:51 -05:00
|
|
|
}
|
|
|
|
|
2023-05-08 08:38:24 -05:00
|
|
|
template <uint16_t opcode> LIBC_INLINE Client::Port Client::open() {
|
2023-04-13 21:27:51 -05:00
|
|
|
for (;;) {
|
2023-05-08 08:38:24 -05:00
|
|
|
if (cpp::optional<Client::Port> p = try_open<opcode>())
|
2023-05-05 11:58:06 -05:00
|
|
|
return cpp::move(p.value());
|
2023-04-13 21:27:51 -05:00
|
|
|
sleep_briefly();
|
2023-03-10 16:48:53 -06:00
|
|
|
}
|
2023-04-13 21:27:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Attempts to open a port to use as the server. The server can only open a
|
|
|
|
/// port if it has a pending receive operation
|
2023-06-19 15:00:04 -05:00
|
|
|
template <uint32_t lane_size>
|
|
|
|
[[clang::convergent]] LIBC_INLINE
|
|
|
|
cpp::optional<typename Server<lane_size>::Port>
|
|
|
|
Server<lane_size>::try_open() {
|
2023-05-01 12:10:04 -05:00
|
|
|
// Perform a naive linear scan for a port that has a pending request.
|
2023-06-19 15:00:04 -05:00
|
|
|
for (uint64_t index = 0; index < this->port_count; ++index) {
|
|
|
|
uint32_t in = this->load_inbox(index);
|
|
|
|
uint32_t out = this->load_outbox(index);
|
2023-05-01 12:10:04 -05:00
|
|
|
|
|
|
|
// The server is passive, if there is no work pending don't bother
|
|
|
|
// opening a port.
|
2023-06-19 15:00:04 -05:00
|
|
|
if (this->buffer_unavailable(in, out))
|
2023-05-01 12:10:04 -05:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Attempt to acquire the lock on this index.
|
|
|
|
uint64_t lane_mask = gpu::get_lane_mask();
|
2023-06-19 15:00:04 -05:00
|
|
|
if (!this->try_lock(lane_mask, index))
|
2023-05-01 12:10:04 -05:00
|
|
|
continue;
|
|
|
|
|
2023-06-19 15:00:04 -05:00
|
|
|
in = this->load_inbox(index);
|
|
|
|
out = this->load_outbox(index);
|
2023-05-01 12:10:04 -05:00
|
|
|
|
2023-06-19 15:00:04 -05:00
|
|
|
if (this->buffer_unavailable(in, out)) {
|
|
|
|
this->unlock(lane_mask, index);
|
2023-05-01 12:10:04 -05:00
|
|
|
continue;
|
|
|
|
}
|
2023-04-13 21:27:51 -05:00
|
|
|
|
2023-05-01 12:10:04 -05:00
|
|
|
return Port(*this, lane_mask, index, out);
|
2023-03-10 16:48:53 -06:00
|
|
|
}
|
2023-05-01 12:10:04 -05:00
|
|
|
return cpp::nullopt;
|
2023-04-13 21:27:51 -05:00
|
|
|
}
|
|
|
|
|
2023-06-19 15:00:04 -05:00
|
|
|
template <uint32_t lane_size>
|
|
|
|
LIBC_INLINE typename Server<lane_size>::Port Server<lane_size>::open() {
|
2023-04-13 21:27:51 -05:00
|
|
|
for (;;) {
|
2023-05-04 00:21:18 +01:00
|
|
|
if (cpp::optional<Server::Port> p = try_open())
|
2023-05-05 11:58:06 -05:00
|
|
|
return cpp::move(p.value());
|
2023-04-13 21:27:51 -05:00
|
|
|
sleep_briefly();
|
|
|
|
}
|
2023-03-10 16:48:53 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace rpc
|
|
|
|
} // namespace __llvm_libc
|
|
|
|
|
|
|
|
#endif
|