io_uring: fix typos and comment wording

Corrected spelling mistakes in comments
 "reuqests" -> "requests", "noifications" -> "notifications",
 "seperately" -> "separately").

Fixed a small grammar issue ("then" -> "than").
Updated "flag" -> "flags" in fdinfo.c

Signed-off-by: Alok Tiwari <alok.a.tiwari@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Alok Tiwari
2025-11-04 21:01:09 -08:00
committed by Jens Axboe
parent 4b25b75c30
commit 59f44afbe8
4 changed files with 7 additions and 7 deletions

View File

@@ -156,7 +156,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
cqe = &r->cqes[(cq_head & cq_mask)]; cqe = &r->cqes[(cq_head & cq_mask)];
if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32) if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32)
cqe32 = true; cqe32 = true;
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x", seq_printf(m, "%5u: user_data:%llu, res:%d, flags:%x",
cq_head & cq_mask, cqe->user_data, cqe->res, cq_head & cq_mask, cqe->user_data, cqe->res,
cqe->flags); cqe->flags);
if (cqe32) if (cqe32)

View File

@@ -882,7 +882,7 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags
} }
/* /*
* Must be called from inline task_work so we now a flush will happen later, * Must be called from inline task_work so we know a flush will happen later,
* and obviously with ctx->uring_lock held (tw always has that). * and obviously with ctx->uring_lock held (tw always has that).
*/ */
void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
@@ -1209,7 +1209,7 @@ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES); BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES);
/* /*
* We don't know how many reuqests is there in the link and whether * We don't know how many requests there are in the link and whether
* they can even be queued lazily, fall back to non-lazy. * they can even be queued lazily, fall back to non-lazy.
*/ */
if (req->flags & IO_REQ_LINK_FLAGS) if (req->flags & IO_REQ_LINK_FLAGS)

View File

@@ -93,7 +93,7 @@ static int io_link_skb(struct sk_buff *skb, struct ubuf_info *uarg)
prev_nd = container_of(prev_uarg, struct io_notif_data, uarg); prev_nd = container_of(prev_uarg, struct io_notif_data, uarg);
prev_notif = cmd_to_io_kiocb(prev_nd); prev_notif = cmd_to_io_kiocb(prev_nd);
/* make sure all noifications can be finished in the same task_work */ /* make sure all notifications can be finished in the same task_work */
if (unlikely(notif->ctx != prev_notif->ctx || if (unlikely(notif->ctx != prev_notif->ctx ||
notif->tctx != prev_notif->tctx)) notif->tctx != prev_notif->tctx))
return -EEXIST; return -EEXIST;

View File

@@ -186,7 +186,7 @@ static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
* This is really a bug in the core code that does this, any issue * This is really a bug in the core code that does this, any issue
* path should assume that a successful (or -EIOCBQUEUED) return can * path should assume that a successful (or -EIOCBQUEUED) return can
* mean that the underlying data can be gone at any time. But that * mean that the underlying data can be gone at any time. But that
* should be fixed seperately, and then this check could be killed. * should be fixed separately, and then this check could be killed.
*/ */
if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) { if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) {
req->flags &= ~REQ_F_NEED_CLEANUP; req->flags &= ~REQ_F_NEED_CLEANUP;
@@ -349,7 +349,7 @@ static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
/* /*
* Have to do this validation here, as this is in io_read() rw->len * Have to do this validation here, as this is in io_read() rw->len
* might have chanaged due to buffer selection * might have changed due to buffer selection
*/ */
return io_iov_buffer_select_prep(req); return io_iov_buffer_select_prep(req);
} }
@@ -1020,7 +1020,7 @@ static int __io_read(struct io_kiocb *req, struct io_br_sel *sel,
iov_iter_restore(&io->iter, &io->iter_state); iov_iter_restore(&io->iter, &io->iter_state);
} while (ret > 0); } while (ret > 0);
done: done:
/* it's faster to check here then delegate to kfree */ /* it's faster to check here than delegate to kfree */
return ret; return ret;
} }