mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-18 03:23:53 -04:00
io_uring: split out task work code into tw.c
Move the task work handling code out of io_uring.c into a new tw.c file. This includes the local work, normal work, and fallback work handling infrastructure. The associated tw.h header contains io_should_terminate_tw() as a static inline helper, along with the necessary function declarations. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -10,6 +10,7 @@
|
||||
#include "alloc_cache.h"
|
||||
#include "io-wq.h"
|
||||
#include "slist.h"
|
||||
#include "tw.h"
|
||||
#include "opdef.h"
|
||||
|
||||
#ifndef CREATE_TRACE_POINTS
|
||||
@@ -88,6 +89,8 @@ struct io_ctx_config {
|
||||
IOSQE_BUFFER_SELECT |\
|
||||
IOSQE_CQE_SKIP_SUCCESS)
|
||||
|
||||
#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
|
||||
|
||||
/*
|
||||
* Complaint timeout for io_uring cancelation exits, and for io-wq exit
|
||||
* worker waiting.
|
||||
@@ -156,8 +159,6 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
|
||||
int io_prepare_config(struct io_ctx_config *config);
|
||||
|
||||
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow, bool cqe32);
|
||||
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
||||
int io_run_local_work(struct io_ring_ctx *ctx, int min_events, int max_events);
|
||||
void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
||||
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
||||
void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
|
||||
@@ -171,15 +172,10 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd);
|
||||
struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
|
||||
unsigned issue_flags);
|
||||
|
||||
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
|
||||
void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags);
|
||||
void io_req_task_queue(struct io_kiocb *req);
|
||||
void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw);
|
||||
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
|
||||
void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw);
|
||||
struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
|
||||
struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
|
||||
void tctx_task_work(struct callback_head *cb);
|
||||
__cold void io_uring_drop_tctx_refs(struct task_struct *task);
|
||||
|
||||
int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
|
||||
@@ -232,11 +228,6 @@ static inline bool io_is_compat(struct io_ring_ctx *ctx)
|
||||
return IS_ENABLED(CONFIG_COMPAT) && unlikely(ctx->compat);
|
||||
}
|
||||
|
||||
static inline void io_req_task_work_add(struct io_kiocb *req)
|
||||
{
|
||||
__io_req_task_work_add(req, 0);
|
||||
}
|
||||
|
||||
static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
|
||||
@@ -461,59 +452,6 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
|
||||
return min(entries, ctx->sq_entries);
|
||||
}
|
||||
|
||||
static inline int io_run_task_work(void)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
/*
|
||||
* Always check-and-clear the task_work notification signal. With how
|
||||
* signaling works for task_work, we can find it set with nothing to
|
||||
* run. We need to clear it for that case, like get_signal() does.
|
||||
*/
|
||||
if (test_thread_flag(TIF_NOTIFY_SIGNAL))
|
||||
clear_notify_signal();
|
||||
/*
|
||||
* PF_IO_WORKER never returns to userspace, so check here if we have
|
||||
* notify work that needs processing.
|
||||
*/
|
||||
if (current->flags & PF_IO_WORKER) {
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME)) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
resume_user_mode_work(NULL);
|
||||
}
|
||||
if (current->io_uring) {
|
||||
unsigned int count = 0;
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
tctx_task_work_run(current->io_uring, UINT_MAX, &count);
|
||||
if (count)
|
||||
ret = true;
|
||||
}
|
||||
}
|
||||
if (task_work_pending(current)) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
task_work_run();
|
||||
ret = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
|
||||
}
|
||||
|
||||
static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return task_work_pending(current) || io_local_work_pending(ctx);
|
||||
}
|
||||
|
||||
static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw)
|
||||
{
|
||||
lockdep_assert_held(&ctx->uring_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't complete immediately but use deferred completion infrastructure.
|
||||
* Protected by ->uring_lock and can only be used either with
|
||||
@@ -571,17 +509,6 @@ static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return likely(ctx->submitter_task == current);
|
||||
}
|
||||
|
||||
static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
|
||||
{
|
||||
return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
|
||||
ctx->submitter_task == current);
|
||||
}
|
||||
|
||||
static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
|
||||
{
|
||||
io_req_set_res(req, res, 0);
|
||||
|
||||
Reference in New Issue
Block a user