mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
io_uring: add local task_work run helper that is entered locked
We have a few spots that drop the mutex just to run local task_work, which immediately tries to grab it again. Add a helper that just passes in whether we're locked already. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a1119fb071
commit
8ac5d85a89
@ -1161,9 +1161,8 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int io_run_local_work(struct io_ring_ctx *ctx)
|
int __io_run_local_work(struct io_ring_ctx *ctx, bool locked)
|
||||||
{
|
{
|
||||||
bool locked;
|
|
||||||
struct llist_node *node;
|
struct llist_node *node;
|
||||||
struct llist_node fake;
|
struct llist_node fake;
|
||||||
struct llist_node *current_final = NULL;
|
struct llist_node *current_final = NULL;
|
||||||
@ -1178,8 +1177,6 @@ int io_run_local_work(struct io_ring_ctx *ctx)
|
|||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
}
|
}
|
||||||
|
|
||||||
locked = mutex_trylock(&ctx->uring_lock);
|
|
||||||
|
|
||||||
node = io_llist_xchg(&ctx->work_llist, &fake);
|
node = io_llist_xchg(&ctx->work_llist, &fake);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
again:
|
again:
|
||||||
@ -1204,12 +1201,24 @@ again:
|
|||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (locked) {
|
if (locked)
|
||||||
io_submit_flush_completions(ctx);
|
io_submit_flush_completions(ctx);
|
||||||
mutex_unlock(&ctx->uring_lock);
|
|
||||||
}
|
|
||||||
trace_io_uring_local_work_run(ctx, ret, loops);
|
trace_io_uring_local_work_run(ctx, ret, loops);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
int io_run_local_work(struct io_ring_ctx *ctx)
|
||||||
|
{
|
||||||
|
bool locked;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
locked = mutex_trylock(&ctx->uring_lock);
|
||||||
|
ret = __io_run_local_work(ctx, locked);
|
||||||
|
if (locked)
|
||||||
|
mutex_unlock(&ctx->uring_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_req_tw_post(struct io_kiocb *req, bool *locked)
|
static void io_req_tw_post(struct io_kiocb *req, bool *locked)
|
||||||
|
@ -27,6 +27,7 @@ enum {
|
|||||||
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
|
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
|
||||||
bool io_req_cqe_overflow(struct io_kiocb *req);
|
bool io_req_cqe_overflow(struct io_kiocb *req);
|
||||||
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
||||||
|
int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
|
||||||
int io_run_local_work(struct io_ring_ctx *ctx);
|
int io_run_local_work(struct io_ring_ctx *ctx);
|
||||||
void io_req_complete_failed(struct io_kiocb *req, s32 res);
|
void io_req_complete_failed(struct io_kiocb *req, s32 res);
|
||||||
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
|
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
|
||||||
|
Loading…
Reference in New Issue
Block a user