From a8cf95f93610eb8282f8b6d0117ba78b74588d6b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 2 Dec 2022 17:47:25 +0000 Subject: [PATCH] io_uring: fix overflow handling regression Because the single task locking series got reordered ahead of the timeout and completion lock changes, two hunks inadvertently ended up using __io_fill_cqe_req() rather than io_fill_cqe_req(). This meant that we dropped overflow handling in those two spots. Reinstate the correct CQE filling helper. Fixes: f66f73421f0a ("io_uring: skip spinlocking for ->task_complete") Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 2 +- io_uring/rw.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index fc64072c53eb..4601e48a173d 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -927,7 +927,7 @@ static void __io_req_complete_post(struct io_kiocb *req) io_cq_lock(ctx); if (!(req->flags & REQ_F_CQE_SKIP)) - __io_fill_cqe_req(ctx, req); + io_fill_cqe_req(ctx, req); /* * If we're the last reference to this request, add to our locked diff --git a/io_uring/rw.c b/io_uring/rw.c index b9cac5706e8d..8227af2e1c0f 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -1062,7 +1062,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) continue; req->cqe.flags = io_put_kbuf(req, 0); - __io_fill_cqe_req(req->ctx, req); + io_fill_cqe_req(req->ctx, req); } if (unlikely(!nr_events))