diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index a1692dad52db..0482087b7c64 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1428,25 +1428,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) * forever, while the workqueue is stuck trying to acquire the * very same mutex. */ - if (wq_list_empty(&ctx->iopoll_list)) { - u32 tail = ctx->cached_cq_tail; + if (wq_list_empty(&ctx->iopoll_list) || + io_task_work_pending(ctx)) { + if (!llist_empty(&ctx->work_llist)) + __io_run_local_work(ctx, true); + if (task_work_pending(current) || + wq_list_empty(&ctx->iopoll_list)) { + u32 tail = ctx->cached_cq_tail; - mutex_unlock(&ctx->uring_lock); - ret = io_run_task_work_ctx(ctx); - mutex_lock(&ctx->uring_lock); - if (ret < 0) - break; + mutex_unlock(&ctx->uring_lock); + ret = io_run_task_work(); + mutex_lock(&ctx->uring_lock); - /* some requests don't go through iopoll_list */ - if (tail != ctx->cached_cq_tail || - wq_list_empty(&ctx->iopoll_list)) - break; - } + if (ret < 0) + break; - if (task_work_pending(current)) { - mutex_unlock(&ctx->uring_lock); - io_run_task_work(); - mutex_lock(&ctx->uring_lock); + /* some requests don't go through iopoll_list */ + if (tail != ctx->cached_cq_tail || + wq_list_empty(&ctx->iopoll_list)) + break; + } } ret = io_do_iopoll(ctx, !min); if (ret < 0) diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 0f90d1dfa42b..9d89425292b7 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -236,6 +236,12 @@ static inline int io_run_task_work(void) return 0; } +static inline bool io_task_work_pending(struct io_ring_ctx *ctx) +{ + return test_thread_flag(TIF_NOTIFY_SIGNAL) || + !wq_list_empty(&ctx->work_llist); +} + static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx) { int ret = 0;