io_uring/eventfd: dedup signalling helpers

Consolidate io_eventfd_flush_signal() and io_eventfd_signal(). Not much
of a difference for now, but it prepares it for following changes.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/5beecd4da65d8d2d83df499196f84b329387f6a2.1745493845.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2025-04-24 12:31:16 +01:00 committed by Jens Axboe
parent 76f1cc98b2
commit 62f666df76
3 changed files with 12 additions and 21 deletions

View File

@ -112,23 +112,16 @@ static struct io_ev_fd *io_eventfd_grab(struct io_ring_ctx *ctx)
return NULL; return NULL;
} }
void io_eventfd_signal(struct io_ring_ctx *ctx) void io_eventfd_signal(struct io_ring_ctx *ctx, bool cqe_event)
{ {
bool skip = false, put_ref = true;
struct io_ev_fd *ev_fd; struct io_ev_fd *ev_fd;
ev_fd = io_eventfd_grab(ctx); ev_fd = io_eventfd_grab(ctx);
if (ev_fd) if (!ev_fd)
io_eventfd_release(ev_fd, __io_eventfd_signal(ev_fd)); return;
}
void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
{
struct io_ev_fd *ev_fd;
ev_fd = io_eventfd_grab(ctx);
if (ev_fd) {
bool skip, put_ref = true;
if (cqe_event) {
/* /*
* Eventfd should only get triggered when at least one event * Eventfd should only get triggered when at least one event
* has been posted. Some applications rely on the eventfd * has been posted. Some applications rely on the eventfd
@ -142,13 +135,12 @@ void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
skip = ctx->cached_cq_tail == ev_fd->last_cq_tail; skip = ctx->cached_cq_tail == ev_fd->last_cq_tail;
ev_fd->last_cq_tail = ctx->cached_cq_tail; ev_fd->last_cq_tail = ctx->cached_cq_tail;
spin_unlock(&ctx->completion_lock); spin_unlock(&ctx->completion_lock);
}
if (!skip) if (!skip)
put_ref = __io_eventfd_signal(ev_fd); put_ref = __io_eventfd_signal(ev_fd);
io_eventfd_release(ev_fd, put_ref); io_eventfd_release(ev_fd, put_ref);
} }
}
int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned int eventfd_async) unsigned int eventfd_async)

View File

@ -4,5 +4,4 @@ int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned int eventfd_async); unsigned int eventfd_async);
int io_eventfd_unregister(struct io_ring_ctx *ctx); int io_eventfd_unregister(struct io_ring_ctx *ctx);
void io_eventfd_flush_signal(struct io_ring_ctx *ctx); void io_eventfd_signal(struct io_ring_ctx *ctx, bool cqe_event);
void io_eventfd_signal(struct io_ring_ctx *ctx);

View File

@ -584,7 +584,7 @@ void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
if (ctx->drain_active) if (ctx->drain_active)
io_queue_deferred(ctx); io_queue_deferred(ctx);
if (ctx->has_evfd) if (ctx->has_evfd)
io_eventfd_flush_signal(ctx); io_eventfd_signal(ctx, true);
} }
static inline void __io_cq_lock(struct io_ring_ctx *ctx) static inline void __io_cq_lock(struct io_ring_ctx *ctx)
@ -1199,7 +1199,7 @@ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
if (ctx->has_evfd) if (ctx->has_evfd)
io_eventfd_signal(ctx); io_eventfd_signal(ctx, false);
} }
nr_wait = atomic_read(&ctx->cq_wait_nr); nr_wait = atomic_read(&ctx->cq_wait_nr);