io_uring: revert "io_uring: optimize submit_and_wait API"

There are cases where it isn't always safe to block for submission,
even if the caller asked to wait for events as well. Revert the
previous optimization of doing that.

This reverts two commits:

bf7ec93c64
c576666863

Fixes: c576666863 ("io_uring: optimize submit_and_wait API")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2019-10-22 13:14:37 -06:00
parent 7d194c2100
commit bc808bced3

View File

@ -2292,11 +2292,11 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
}
static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
struct sqe_submit *s, bool force_nonblock)
struct sqe_submit *s)
{
int ret;
ret = __io_submit_sqe(ctx, req, s, force_nonblock);
ret = __io_submit_sqe(ctx, req, s, true);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
@ -2343,7 +2343,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
}
static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
struct sqe_submit *s, bool force_nonblock)
struct sqe_submit *s)
{
int ret;
@ -2356,18 +2356,17 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return 0;
}
return __io_queue_sqe(ctx, req, s, force_nonblock);
return __io_queue_sqe(ctx, req, s);
}
static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
struct sqe_submit *s, struct io_kiocb *shadow,
bool force_nonblock)
struct sqe_submit *s, struct io_kiocb *shadow)
{
int ret;
int need_submit = false;
if (!shadow)
return io_queue_sqe(ctx, req, s, force_nonblock);
return io_queue_sqe(ctx, req, s);
/*
* Mark the first IO in link list as DRAIN, let all the following
@ -2396,7 +2395,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
spin_unlock_irq(&ctx->completion_lock);
if (need_submit)
return __io_queue_sqe(ctx, req, s, force_nonblock);
return __io_queue_sqe(ctx, req, s);
return 0;
}
@ -2404,8 +2403,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
struct io_submit_state *state, struct io_kiocb **link,
bool force_nonblock)
struct io_submit_state *state, struct io_kiocb **link)
{
struct io_uring_sqe *sqe_copy;
struct io_kiocb *req;
@ -2458,7 +2456,7 @@ err:
INIT_LIST_HEAD(&req->link_list);
*link = req;
} else {
io_queue_sqe(ctx, req, s, force_nonblock);
io_queue_sqe(ctx, req, s);
}
}
@ -2562,8 +2560,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
io_queue_link_head(ctx, link, &link->submit, shadow_req,
true);
io_queue_link_head(ctx, link, &link->submit, shadow_req);
link = NULL;
shadow_req = NULL;
}
@ -2588,13 +2585,13 @@ out:
sqes[i].has_user = has_user;
sqes[i].needs_lock = true;
sqes[i].needs_fixed_file = true;
io_submit_sqe(ctx, &sqes[i], statep, &link, true);
io_submit_sqe(ctx, &sqes[i], statep, &link);
submitted++;
}
}
if (link)
io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
io_queue_link_head(ctx, link, &link->submit, shadow_req);
if (statep)
io_submit_state_end(&state);
@ -2726,8 +2723,7 @@ static int io_sq_thread(void *data)
return 0;
}
static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
bool block_for_last)
static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
@ -2741,7 +2737,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
}
for (i = 0; i < to_submit; i++) {
bool force_nonblock = true;
struct sqe_submit s;
if (!io_get_sqring(ctx, &s))
@ -2752,8 +2747,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
io_queue_link_head(ctx, link, &link->submit, shadow_req,
force_nonblock);
io_queue_link_head(ctx, link, &link->submit, shadow_req);
link = NULL;
shadow_req = NULL;
}
@ -2775,24 +2769,12 @@ out:
s.needs_lock = false;
s.needs_fixed_file = false;
submit++;
/*
* The caller will block for events after submit, submit the
* last IO non-blocking. This is either the only IO it's
* submitting, or it already submitted the previous ones. This
* improves performance by avoiding an async punt that we don't
* need to do.
*/
if (block_for_last && submit == to_submit)
force_nonblock = false;
io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
io_submit_sqe(ctx, &s, statep, &link);
}
io_commit_sqring(ctx);
if (link)
io_queue_link_head(ctx, link, &link->submit, shadow_req,
!block_for_last);
io_queue_link_head(ctx, link, &link->submit, shadow_req);
if (statep)
io_submit_state_end(statep);
@ -3636,21 +3618,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
wake_up(&ctx->sqo_wait);
submitted = to_submit;
} else if (to_submit) {
bool block_for_last = false;
to_submit = min(to_submit, ctx->sq_entries);
/*
* Allow last submission to block in a series, IFF the caller
* asked to wait for events and we don't currently have
* enough. This potentially avoids an async punt.
*/
if (to_submit == min_complete &&
io_cqring_events(ctx->rings) < min_complete)
block_for_last = true;
mutex_lock(&ctx->uring_lock);
submitted = io_ring_submit(ctx, to_submit, block_for_last);
submitted = io_ring_submit(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
}
if (flags & IORING_ENTER_GETEVENTS) {