2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 15:43:59 +08:00

io_uring: tie req->apoll to request lifetime

We manage these separately right now, just tie it to the request lifetime
and make it be part of the usual REQ_F_NEED_CLEANUP logic.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2021-04-15 09:52:40 -06:00
parent 4e3d9ff905
commit 75652a30ff

View File

@ -1600,7 +1600,8 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
static inline bool io_req_needs_clean(struct io_kiocb *req)
{
return req->flags & (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP);
return req->flags & (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP |
REQ_F_POLLED);
}
static void io_req_complete_state(struct io_kiocb *req, long res,
@ -5038,9 +5039,6 @@ static void io_async_task_func(struct callback_head *cb)
__io_req_task_submit(req);
else
io_req_complete_failed(req, -ECANCELED);
kfree(apoll->double_poll);
kfree(apoll);
}
static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@ -5156,8 +5154,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
if (ret || ipt.error) {
io_poll_remove_double(req);
spin_unlock_irq(&ctx->completion_lock);
kfree(apoll->double_poll);
kfree(apoll);
return false;
}
spin_unlock_irq(&ctx->completion_lock);
@ -5195,12 +5191,8 @@ static bool io_poll_remove_waitqs(struct io_kiocb *req)
do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
if (req->opcode != IORING_OP_POLL_ADD && do_complete) {
struct async_poll *apoll = req->apoll;
/* non-poll requests have submit ref still */
req_ref_put(req);
kfree(apoll->double_poll);
kfree(apoll);
}
return do_complete;
}
@ -6054,6 +6046,11 @@ static void io_clean_op(struct io_kiocb *req)
}
req->flags &= ~REQ_F_NEED_CLEANUP;
}
if ((req->flags & REQ_F_POLLED) && req->apoll) {
kfree(req->apoll->double_poll);
kfree(req->apoll);
req->apoll = NULL;
}
}
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)