io_uring: Enable KASAN for request cache

Every io_uring request is represented by struct io_kiocb, which is
cached locally by io_uring (not SLAB/SLUB) in the list called
submit_state.freelist. This patch simply enabled KASAN for this free
list.

This list is initially created by KMEM_CACHE, but later, managed by
io_uring. This patch basically poisons the objects that are not used
(i.e., they are the free list), and unpoisons it when the object is
allocated/removed from the list.

Touching these poisoned objects while in the freelist will cause a KASAN
warning.

Suggested-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Breno Leitao <leitao@debian.org>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Breno Leitao 2023-01-18 07:56:30 -08:00 committed by Jens Axboe
parent b5d3ae202f
commit c1755c25a7
2 changed files with 10 additions and 4 deletions

View File

@ -151,7 +151,7 @@ static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
static __cold void io_fallback_tw(struct io_uring_task *tctx);
static struct kmem_cache *req_cachep;
struct kmem_cache *req_cachep;
struct sock *io_uring_get_socket(struct file *file)
{
@ -230,6 +230,7 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res)
static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
{
wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
kasan_poison_object_data(req_cachep, req);
}
static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)

View File

@ -4,6 +4,7 @@
#include <linux/errno.h>
#include <linux/lockdep.h>
#include <linux/resume_user_mode.h>
#include <linux/kasan.h>
#include <linux/io_uring_types.h>
#include <uapi/linux/eventpoll.h>
#include "io-wq.h"
@ -347,12 +348,16 @@ static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
return true;
}
extern struct kmem_cache *req_cachep;
static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
{
struct io_wq_work_node *node;
struct io_kiocb *req;
node = wq_stack_extract(&ctx->submit_state.free_list);
return container_of(node, struct io_kiocb, comp_list);
req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
kasan_unpoison_object_data(req_cachep, req);
wq_stack_extract(&ctx->submit_state.free_list);
return req;
}
static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)