mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-05 20:24:09 +08:00
io_uring: mitigate unlikely iopoll lag
We have requests like IORING_OP_FILES_UPDATE that don't go through ->iopoll_list but get completed in place under ->uring_lock, and so after dropping the lock io_iopoll_check() should expect that some CQEs might have get completed in a meanwhile. Currently such events won't be accounted in @nr_events, and the loop will continue to poll even if there is enough of CQEs. It shouldn't be a problem as it's not likely to happen and so, but not nice either. Just return earlier in this case, it should be enough. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/66ef932cc66a34e3771bbae04b2953a8058e9d05.1625747741.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c32aace0cf
commit
8f487ef2cb
@ -2356,11 +2356,15 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
|
|||||||
* very same mutex.
|
* very same mutex.
|
||||||
*/
|
*/
|
||||||
if (list_empty(&ctx->iopoll_list)) {
|
if (list_empty(&ctx->iopoll_list)) {
|
||||||
|
u32 tail = ctx->cached_cq_tail;
|
||||||
|
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
io_run_task_work();
|
io_run_task_work();
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
|
|
||||||
if (list_empty(&ctx->iopoll_list))
|
/* some requests don't go through iopoll_list */
|
||||||
|
if (tail != ctx->cached_cq_tail ||
|
||||||
|
list_empty(&ctx->iopoll_list))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
ret = io_do_iopoll(ctx, &nr_events, min);
|
ret = io_do_iopoll(ctx, &nr_events, min);
|
||||||
|
Loading…
Reference in New Issue
Block a user