26 FdTable(io_uring &ring) : ring_(ring) {}
28 FdTable(
const FdTable &) =
delete;
29 FdTable &operator=(
const FdTable &) =
delete;
30 FdTable(FdTable &&) =
delete;
31 FdTable &operator=(FdTable &&) =
delete;
39 int init(
size_t capacity) {
40 return io_uring_register_files_sparse(&ring_, capacity);
47 int destroy() {
return io_uring_unregister_files(&ring_); }
56 int update(
unsigned index_base,
const int *fds,
unsigned nr_fds) {
57 return io_uring_register_files_update(&ring_, index_base, fds, nr_fds);
70 fd_accepter_ = std::forward<Func>(accepter);
80 return io_uring_register_file_alloc_range(&ring_, offset, size);
84 std::function<void(int32_t)> fd_accepter_ =
nullptr;
99 BufferTable(io_uring &ring) : ring_(ring) {}
101 BufferTable(
const BufferTable &) =
delete;
102 BufferTable &operator=(
const BufferTable &) =
delete;
103 BufferTable(BufferTable &&) =
delete;
104 BufferTable &operator=(BufferTable &&) =
delete;
113 int r = io_uring_register_buffers_sparse(&ring_, capacity);
126 initialized_ =
false;
127 return io_uring_unregister_buffers(&ring_);
137 int update(
unsigned index_base,
const iovec *vecs,
unsigned nr_vecs) {
138 return io_uring_register_buffers_update_tag(&ring_, index_base, vecs,
142#if !IO_URING_CHECK_VERSION(2, 10)
153 unsigned int src_off = 0,
unsigned int nr = 0) {
154 auto *src_ring = &src.ring_;
155 auto *dst_ring = &ring_;
156 unsigned int flags = 0;
158 flags |= IORING_REGISTER_DST_REPLACE;
160 int r = __io_uring_clone_buffers_offset(dst_ring, src_ring, dst_off,
172 bool initialized_ =
false;
182 RingSettings(io_uring &ring) : ring_(ring) {}
186 io_uring_free_probe(probe_);
191 RingSettings(
const RingSettings &) =
delete;
192 RingSettings &operator=(
const RingSettings &) =
delete;
193 RingSettings(RingSettings &&) =
delete;
194 RingSettings &operator=(RingSettings &&) =
delete;
204 return io_uring_register_iowq_aff(&ring_, cpusz, mask);
219 return io_uring_register_iowq_max_workers(&ring_, values);
231 probe_ = io_uring_get_probe_ring(&ring_);
241#if !IO_URING_CHECK_VERSION(2, 6)
248 return io_uring_register_napi(&ring_, napi);
255 return io_uring_unregister_napi(&ring_, napi);
259#if !IO_URING_CHECK_VERSION(2, 8)
266 return io_uring_register_clock(&ring_, clock_reg);
270#if !IO_URING_CHECK_VERSION(2, 9)
277 return io_uring_resize_rings(&ring_, params);
281#if !IO_URING_CHECK_VERSION(2, 10)
288 return io_uring_set_iowait(&ring_, enable_iowait);
294 io_uring_probe *probe_ =
nullptr;
295 uint32_t features_ = 0;
303 ~Ring() { destroy(); }
305 Ring(
const Ring &) =
delete;
306 Ring &operator=(
const Ring &) =
delete;
307 Ring(Ring &&) =
delete;
308 Ring &operator=(Ring &&) =
delete;
311 int init(
unsigned int entries, io_uring_params *params,
312 [[maybe_unused]]
void *buf =
nullptr,
313 [[maybe_unused]]
size_t buf_size = 0) {
315 assert(!initialized_);
316#if !IO_URING_CHECK_VERSION(2, 5)
317 if (params->flags & IORING_SETUP_NO_MMAP) {
318 r = io_uring_queue_init_mem(entries, &ring_, params, buf, buf_size);
321 r = io_uring_queue_init_params(entries, &ring_, params);
325 settings_.features_ = params->features;
326 sqpoll_mode_ = (params->flags & IORING_SETUP_SQPOLL) != 0;
333 io_uring_queue_exit(&ring_);
334 initialized_ =
false;
338 void submit() { io_uring_submit(&ring_); }
340 template <
typename Func>
size_t reap_completions_wait(Func &&process_func) {
345 int r = io_uring_submit_and_wait(&ring_, 1);
346 if (r >= 0) [[likely]] {
348 }
else if (r == -EINTR) {
351 throw make_system_error(
"io_uring_submit_and_wait", -r);
355 io_uring_for_each_cqe(&ring_, head, cqe) {
357#if !IO_URING_CHECK_VERSION(2, 13)
358 reaped += io_uring_cqe_nr(cqe);
363 io_uring_cq_advance(&ring_, reaped);
367 template <
typename Func>
size_t reap_completions(Func &&process_func) {
372 if (io_uring_peek_cqe(&ring_, &cqe) == 0) {
373 io_uring_for_each_cqe(&ring_, head, cqe) {
375#if !IO_URING_CHECK_VERSION(2, 13)
376 reaped += io_uring_cqe_nr(cqe);
381 io_uring_cq_advance(&ring_, reaped);
387 void reserve_space(
size_t n) {
390 space_left = io_uring_sq_space_left(&ring_);
391 if (space_left >= n) {
398 io_uring *ring() {
return &ring_; }
400 FdTable &fd_table() {
return fd_table_; }
402 BufferTable &buffer_table() {
return buffer_table_; }
404 RingSettings &settings() {
return settings_; }
406 io_uring_sqe *get_sqe() {
return get_sqe_<io_uring_get_sqe>(); }
408#if !IO_URING_CHECK_VERSION(2, 13)
409 io_uring_sqe *get_sqe128() {
410 if (ring_.flags & (IORING_SETUP_SQE128 | IORING_SETUP_SQE_MIXED))
412 return get_sqe_<io_uring_get_sqe128>();
414 panic_on(
"SQE128 is not enabled for this io_uring ring");
420 template <io_uring_sqe *(*get_sqe)(struct io_uring *)>
421 io_uring_sqe *get_sqe_() {
422 [[maybe_unused]]
int r;
425 sqe = get_sqe(&ring_);
429 r = io_uring_submit(&ring_);
432 r = io_uring_sqring_wait(&ring_);
440 bool initialized_ =
false;
442 bool sqpoll_mode_ =
false;
444 FdTable fd_table_{ring_};
445 BufferTable buffer_table_{ring_};
446 RingSettings settings_{ring_};
int destroy()
Destroy the buffer table.
int update(unsigned index_base, const iovec *vecs, unsigned nr_vecs)
Update the buffer table starting from the given index.
int init(size_t capacity)
Initialize the buffer table with the given capacity.
int clone_buffers(BufferTable &src, unsigned int dst_off=0, unsigned int src_off=0, unsigned int nr=0)
Clone buffers from another BufferTable into this one.
File descriptor table for io_uring.
void set_fd_accepter(Func &&accepter)
Set the accepter function for incoming file descriptors.
int update(unsigned index_base, const int *fds, unsigned nr_fds)
Update the file descriptor table starting from the given index.
int init(size_t capacity)
Initialize the file descriptor table with the given capacity.
int destroy()
Destroy the file descriptor table.
int set_file_alloc_range(unsigned offset, unsigned size)
Set the file allocation range for the fd table.
friend auto async_fixed_fd_send(FdTable &dst, int source_fd, int target_fd, unsigned int flags)
See io_uring_prep_msg_ring_fd.
int set_clock(io_uring_clock_register *clock_reg)
Set the clock registration for the io_uring instance.
uint32_t get_features() const
Get the supported features of the ring.
int remove_iowq_aff()
Remove I/O worker queue affinity settings.
int set_iowait(bool enable_iowait)
Enable or disable iowait for the io_uring instance.
int apply_iowq_aff(size_t cpusz, const cpu_set_t *mask)
Apply I/O worker queue affinity settings.
io_uring_probe * get_probe()
Get the io_uring probe for the ring.
int set_iowq_max_workers(unsigned int *values)
Set the maximum number of I/O workers.
int remove_napi(io_uring_napi *napi=nullptr)
Remove NAPI settings from the io_uring instance.
int apply_napi(io_uring_napi *napi)
Apply NAPI settings to the io_uring instance.
int set_rings_size(io_uring_params *params)
Resize the rings of the io_uring instance.
The event loop runtime for executing asynchronous.
The main namespace for the Condy library.
Internal utility classes and functions used by Condy.