mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 22:24:09 +08:00
io_uring: use inflight_entry list for iopoll'ing
req->inflight_entry is used to track requests that grabbed files_struct. Let's share it with iopoll list, because the only iopoll'ed ops are reads and writes, which don't need a file table. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
540e32a085
commit
d21ffe7eca
@ -651,6 +651,10 @@ struct io_kiocb {
|
||||
|
||||
struct list_head link_list;
|
||||
|
||||
/*
|
||||
* 1. used with ctx->iopoll_list with reads/writes
|
||||
* 2. to track reqs with ->files (see io_op_def::file_table)
|
||||
*/
|
||||
struct list_head inflight_entry;
|
||||
|
||||
struct percpu_ref *fixed_file_refs;
|
||||
@ -1943,8 +1947,8 @@ static void io_iopoll_queue(struct list_head *again)
|
||||
struct io_kiocb *req;
|
||||
|
||||
do {
|
||||
req = list_first_entry(again, struct io_kiocb, list);
|
||||
list_del(&req->list);
|
||||
req = list_first_entry(again, struct io_kiocb, inflight_entry);
|
||||
list_del(&req->inflight_entry);
|
||||
if (!io_rw_reissue(req, -EAGAIN))
|
||||
io_complete_rw_common(&req->rw.kiocb, -EAGAIN, NULL);
|
||||
} while (!list_empty(again));
|
||||
@ -1967,13 +1971,13 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
|
||||
while (!list_empty(done)) {
|
||||
int cflags = 0;
|
||||
|
||||
req = list_first_entry(done, struct io_kiocb, list);
|
||||
req = list_first_entry(done, struct io_kiocb, inflight_entry);
|
||||
if (READ_ONCE(req->result) == -EAGAIN) {
|
||||
req->iopoll_completed = 0;
|
||||
list_move_tail(&req->list, &again);
|
||||
list_move_tail(&req->inflight_entry, &again);
|
||||
continue;
|
||||
}
|
||||
list_del(&req->list);
|
||||
list_del(&req->inflight_entry);
|
||||
|
||||
if (req->flags & REQ_F_BUFFER_SELECTED)
|
||||
cflags = io_put_kbuf(req);
|
||||
@ -2009,7 +2013,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
|
||||
spin = !ctx->poll_multi_file && *nr_events < min;
|
||||
|
||||
ret = 0;
|
||||
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, list) {
|
||||
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
|
||||
struct kiocb *kiocb = &req->rw.kiocb;
|
||||
|
||||
/*
|
||||
@ -2018,7 +2022,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
|
||||
* and complete those lists first, if we have entries there.
|
||||
*/
|
||||
if (READ_ONCE(req->iopoll_completed)) {
|
||||
list_move_tail(&req->list, &done);
|
||||
list_move_tail(&req->inflight_entry, &done);
|
||||
continue;
|
||||
}
|
||||
if (!list_empty(&done))
|
||||
@ -2030,7 +2034,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
|
||||
|
||||
/* iopoll may have completed current req */
|
||||
if (READ_ONCE(req->iopoll_completed))
|
||||
list_move_tail(&req->list, &done);
|
||||
list_move_tail(&req->inflight_entry, &done);
|
||||
|
||||
if (ret && spin)
|
||||
spin = false;
|
||||
@ -2297,7 +2301,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
|
||||
struct io_kiocb *list_req;
|
||||
|
||||
list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
|
||||
list);
|
||||
inflight_entry);
|
||||
if (list_req->file != req->file)
|
||||
ctx->poll_multi_file = true;
|
||||
}
|
||||
@ -2307,9 +2311,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
|
||||
* it to the front so we find it first.
|
||||
*/
|
||||
if (READ_ONCE(req->iopoll_completed))
|
||||
list_add(&req->list, &ctx->iopoll_list);
|
||||
list_add(&req->inflight_entry, &ctx->iopoll_list);
|
||||
else
|
||||
list_add_tail(&req->list, &ctx->iopoll_list);
|
||||
list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
|
||||
|
||||
if ((ctx->flags & IORING_SETUP_SQPOLL) &&
|
||||
wq_has_sleeper(&ctx->sqo_wait))
|
||||
|
Loading…
Reference in New Issue
Block a user