mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 21:54:11 +08:00
io_uring: track link's head and tail during submit
Explicitly save not only a link's head in io_submit_sqe[s]() but the tail as well. That's in preparation for keeping linked requests in a singly linked list. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
018043be1f
commit
863e05604a
@ -6536,8 +6536,13 @@ static inline void io_queue_link_head(struct io_kiocb *req,
|
||||
io_queue_sqe(req, NULL, cs);
|
||||
}
|
||||
|
||||
struct io_submit_link {
|
||||
struct io_kiocb *head;
|
||||
struct io_kiocb *last;
|
||||
};
|
||||
|
||||
static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
struct io_kiocb **link, struct io_comp_state *cs)
|
||||
struct io_submit_link *link, struct io_comp_state *cs)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
int ret;
|
||||
@ -6549,8 +6554,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
* submitted sync once the chain is complete. If none of those
|
||||
* conditions are true (normal request), then just queue it.
|
||||
*/
|
||||
if (*link) {
|
||||
struct io_kiocb *head = *link;
|
||||
if (link->head) {
|
||||
struct io_kiocb *head = link->head;
|
||||
|
||||
/*
|
||||
* Taking sequential execution of a link, draining both sides
|
||||
@ -6571,11 +6576,12 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
}
|
||||
trace_io_uring_link(ctx, req, head);
|
||||
list_add_tail(&req->link_list, &head->link_list);
|
||||
link->last = req;
|
||||
|
||||
/* last request of a link, enqueue the link */
|
||||
if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
|
||||
io_queue_link_head(head, cs);
|
||||
*link = NULL;
|
||||
link->head = NULL;
|
||||
}
|
||||
} else {
|
||||
if (unlikely(ctx->drain_next)) {
|
||||
@ -6589,7 +6595,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
ret = io_req_defer_prep(req, sqe);
|
||||
if (unlikely(ret))
|
||||
req->flags |= REQ_F_FAIL_LINK;
|
||||
*link = req;
|
||||
link->head = req;
|
||||
link->last = req;
|
||||
} else {
|
||||
io_queue_sqe(req, sqe, cs);
|
||||
}
|
||||
@ -6769,7 +6776,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
||||
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
|
||||
{
|
||||
struct io_submit_state state;
|
||||
struct io_kiocb *link = NULL;
|
||||
struct io_submit_link link;
|
||||
int i, submitted = 0;
|
||||
|
||||
/* if we have a backlog and couldn't flush it all, return BUSY */
|
||||
@ -6789,6 +6796,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
|
||||
refcount_add(nr, ¤t->usage);
|
||||
|
||||
io_submit_state_start(&state, ctx, nr);
|
||||
link.head = NULL;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
const struct io_uring_sqe *sqe;
|
||||
@ -6834,8 +6842,8 @@ fail_req:
|
||||
percpu_counter_sub(&tctx->inflight, unused);
|
||||
put_task_struct_many(current, unused);
|
||||
}
|
||||
if (link)
|
||||
io_queue_link_head(link, &state.comp);
|
||||
if (link.head)
|
||||
io_queue_link_head(link.head, &state.comp);
|
||||
io_submit_state_end(&state);
|
||||
|
||||
/* Commit SQ ring head once we've consumed and submitted all SQEs */
|
||||
|
Loading…
Reference in New Issue
Block a user