Merge tag 'for-6.14/io_uring-20250119' of git://git.kernel.dk/linux

Pull io_uring updates from Jens Axboe:
 "Not a lot in terms of features this time around, mostly just cleanups
  and code consolidation:

   - Support for PI meta data read/write via io_uring, with NVMe and
     SCSI covered

   - Cleanup the per-op structure caching, making it consistent across
     various command types

   - Consolidate the various user mapped features into a concept called
     regions, making the various users of that consistent

   - Various cleanups and fixes"

* tag 'for-6.14/io_uring-20250119' of git://git.kernel.dk/linux: (56 commits)
  io_uring/fdinfo: fix io_uring_show_fdinfo() misuse of ->d_iname
  io_uring: reuse io_should_terminate_tw() for cmds
  io_uring: Factor out a function to parse restrictions
  io_uring/rsrc: require cloned buffers to share accounting contexts
  io_uring: simplify the SQPOLL thread check when cancelling requests
  io_uring: expose read/write attribute capability
  io_uring/rw: don't gate retry on completion context
  io_uring/rw: handle -EAGAIN retry at IO completion time
  io_uring/rw: use io_rw_recycle() from cleanup path
  io_uring/rsrc: simplify the bvec iter count calculation
  io_uring: ensure io_queue_deferred() is out-of-line
  io_uring/rw: always clear ->bytes_done on io_async_rw setup
  io_uring/rw: use NULL for rw->free_iovec assigment
  io_uring/rw: don't mask in f_iocb_flags
  io_uring/msg_ring: Drop custom destructor
  io_uring: Move old async data allocation helper to header
  io_uring/rw: Allocate async data through helper
  io_uring/net: Allocate msghdr async data through helper
  io_uring/uring_cmd: Allocate async data through generic helper
  io_uring/poll: Allocate apoll with generic alloc_cache helper
  ...
This commit is contained in:
Linus Torvalds
2025-01-20 20:27:33 -08:00
32 changed files with 859 additions and 790 deletions

View File

@@ -16,26 +16,6 @@
#include "rsrc.h"
#include "uring_cmd.h"
static struct io_uring_cmd_data *io_uring_async_get(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_uring_cmd_data *cache;
cache = io_alloc_cache_get(&ctx->uring_cache);
if (cache) {
cache->op_data = NULL;
req->flags |= REQ_F_ASYNC_DATA;
req->async_data = cache;
return cache;
}
if (!io_alloc_async_data(req)) {
cache = req->async_data;
cache->op_data = NULL;
return cache;
}
return NULL;
}
static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
@@ -130,7 +110,7 @@ static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
unsigned int flags = IO_URING_F_COMPLETE_DEFER;
if (current->flags & (PF_EXITING | PF_KTHREAD))
if (io_should_terminate_tw())
flags |= IO_URING_F_TASK_DEAD;
/* task_work executor checks the deffered list completion */
@@ -188,14 +168,22 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
static void io_uring_cmd_init_once(void *obj)
{
struct io_uring_cmd_data *data = obj;
data->op_data = NULL;
}
static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
struct io_uring_cmd_data *cache;
cache = io_uring_async_get(req);
if (unlikely(!cache))
cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req,
io_uring_cmd_init_once);
if (!cache)
return -ENOMEM;
if (!(req->flags & REQ_F_FORCE_ASYNC)) {