io_uring: add async data clear/free helpers

Futex recently had an issue where it mishandled how ->async_data and
REQ_F_ASYNC_DATA is handled. To avoid future issues like that, add a set
of helpers that either clear or clear-and-free the async data assigned
to a struct io_kiocb.

Convert existing manual handling of that to use the helpers. No intended
functional changes in this patch.

Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2025-08-22 08:19:56 -06:00
parent c986f7586b
commit 4c0b26e23c
6 changed files with 23 additions and 22 deletions

View File

@ -43,7 +43,6 @@ void io_futex_cache_free(struct io_ring_ctx *ctx)
static void __io_futex_complete(struct io_kiocb *req, io_tw_token_t tw)
{
req->async_data = NULL;
hlist_del_init(&req->hash_node);
io_req_task_complete(req, tw);
}
@ -54,6 +53,7 @@ static void io_futex_complete(struct io_kiocb *req, io_tw_token_t tw)
io_tw_lock(ctx, tw);
io_cache_free(&ctx->futex_cache, req->async_data);
io_req_async_data_clear(req, 0);
__io_futex_complete(req, tw);
}
@ -72,8 +72,7 @@ static void io_futexv_complete(struct io_kiocb *req, io_tw_token_t tw)
io_req_set_res(req, res, 0);
}
kfree(req->async_data);
req->flags &= ~REQ_F_ASYNC_DATA;
io_req_async_data_free(req);
__io_futex_complete(req, tw);
}
@ -232,9 +231,7 @@ int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags)
io_ring_submit_unlock(ctx, issue_flags);
req_set_fail(req);
io_req_set_res(req, ret, 0);
kfree(futexv);
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
io_req_async_data_free(req);
return IOU_COMPLETE;
}
@ -310,9 +307,7 @@ done:
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
kfree(ifd);
io_req_async_data_free(req);
return IOU_COMPLETE;
}

View File

@ -281,6 +281,19 @@ static inline bool req_has_async_data(struct io_kiocb *req)
return req->flags & REQ_F_ASYNC_DATA;
}
static inline void io_req_async_data_clear(struct io_kiocb *req,
io_req_flags_t extra_flags)
{
req->flags &= ~(REQ_F_ASYNC_DATA|extra_flags);
req->async_data = NULL;
}
static inline void io_req_async_data_free(struct io_kiocb *req)
{
kfree(req->async_data);
io_req_async_data_clear(req, 0);
}
static inline void io_put_file(struct io_kiocb *req)
{
if (!(req->flags & REQ_F_FIXED_FILE) && req->file)

View File

@ -178,10 +178,8 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
if (hdr->vec.nr > IO_VEC_CACHE_SOFT_CAP)
io_vec_free(&hdr->vec);
if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
req->async_data = NULL;
req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
}
if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr))
io_req_async_data_clear(req, REQ_F_NEED_CLEANUP);
}
static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)

View File

@ -154,10 +154,8 @@ static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
if (rw->vec.nr > IO_VEC_CACHE_SOFT_CAP)
io_vec_free(&rw->vec);
if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) {
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
}
if (io_alloc_cache_put(&req->ctx->rw_cache, rw))
io_req_async_data_clear(req, 0);
}
static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)

View File

@ -37,8 +37,7 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
if (io_alloc_cache_put(&req->ctx->cmd_cache, ac)) {
ioucmd->sqe = NULL;
req->async_data = NULL;
req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
io_req_async_data_clear(req, REQ_F_NEED_CLEANUP);
}
}

View File

@ -37,9 +37,7 @@ static void io_waitid_free(struct io_kiocb *req)
struct io_waitid_async *iwa = req->async_data;
put_pid(iwa->wo.wo_pid);
kfree(req->async_data);
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
io_req_async_data_free(req);
}
static bool io_waitid_compat_copy_si(struct io_waitid *iw, int signo)