io_uring/msg_ring: kill alloc_cache for io_kiocb allocations
A recent commit:fc582cd26e
("io_uring/msg_ring: ensure io_kiocb freeing is deferred for RCU") fixed an issue with not deferring freeing of io_kiocb structs that msg_ring allocates to after the current RCU grace period. But this only covers requests that don't end up in the allocation cache. If a request goes into the alloc cache, it can get reused before it is sane to do so. A recent syzbot report would seem to indicate that there's something there, however it may very well just be because of the KASAN poisoning that the alloc_cache handles manually. Rather than attempt to make the alloc_cache sane for that use case, just drop the usage of the alloc_cache for msg_ring request payload data. Fixes:50cf5f3842
("io_uring/msg_ring: add an alloc cache for io_kiocb entries") Link: https://lore.kernel.org/io-uring/68cc2687.050a0220.139b6.0005.GAE@google.com/ Reported-by: syzbot+baa2e0f4e02df602583e@syzkaller.appspotmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3539b1467e
commit
df8922afc3
|
@ -420,9 +420,6 @@ struct io_ring_ctx {
|
|||
struct list_head defer_list;
|
||||
unsigned nr_drained;
|
||||
|
||||
struct io_alloc_cache msg_cache;
|
||||
spinlock_t msg_lock;
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
struct list_head napi_list; /* track busy poll napi_id */
|
||||
spinlock_t napi_lock; /* napi_list lock */
|
||||
|
|
|
@ -290,7 +290,6 @@ static void io_free_alloc_caches(struct io_ring_ctx *ctx)
|
|||
io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
|
||||
io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
|
||||
io_alloc_cache_free(&ctx->cmd_cache, io_cmd_cache_free);
|
||||
io_alloc_cache_free(&ctx->msg_cache, kfree);
|
||||
io_futex_cache_free(ctx);
|
||||
io_rsrc_cache_free(ctx);
|
||||
}
|
||||
|
@ -337,9 +336,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
|||
ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX,
|
||||
sizeof(struct io_async_cmd),
|
||||
sizeof(struct io_async_cmd));
|
||||
spin_lock_init(&ctx->msg_lock);
|
||||
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
|
||||
sizeof(struct io_kiocb), 0);
|
||||
ret |= io_futex_cache_init(ctx);
|
||||
ret |= io_rsrc_cache_init(ctx);
|
||||
if (ret)
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#include "io_uring.h"
|
||||
#include "rsrc.h"
|
||||
#include "filetable.h"
|
||||
#include "alloc_cache.h"
|
||||
#include "msg_ring.h"
|
||||
|
||||
/* All valid masks for MSG_RING */
|
||||
|
@ -76,12 +75,6 @@ static void io_msg_tw_complete(struct io_kiocb *req, io_tw_token_t tw)
|
|||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
|
||||
if (spin_trylock(&ctx->msg_lock)) {
|
||||
if (io_alloc_cache_put(&ctx->msg_cache, req))
|
||||
req = NULL;
|
||||
spin_unlock(&ctx->msg_lock);
|
||||
}
|
||||
if (req)
|
||||
kfree_rcu(req, rcu_head);
|
||||
percpu_ref_put(&ctx->refs);
|
||||
}
|
||||
|
@ -104,26 +97,13 @@ static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_kiocb *req = NULL;
|
||||
|
||||
if (spin_trylock(&ctx->msg_lock)) {
|
||||
req = io_alloc_cache_get(&ctx->msg_cache);
|
||||
spin_unlock(&ctx->msg_lock);
|
||||
if (req)
|
||||
return req;
|
||||
}
|
||||
return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
|
||||
}
|
||||
|
||||
static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
|
||||
struct io_msg *msg)
|
||||
{
|
||||
struct io_kiocb *target;
|
||||
u32 flags = 0;
|
||||
|
||||
target = io_msg_get_kiocb(target_ctx);
|
||||
target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO) ;
|
||||
if (unlikely(!target))
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
Loading…
Reference in New Issue