mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 10:44:14 +08:00
io_uring: fix race between timeout flush and removal
io_flush_timeouts() assumes the timeout isn't in progress of triggering or being removed/canceled, so it unconditionally removes it from the timeout list and attempts to cancel it. Leave it on the list and let the normal timeout cancelation take care of it. Cc: stable@vger.kernel.org # 5.5+ Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
4cdd158be9
commit
e677edbcab
@ -1736,12 +1736,11 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
|
||||
__must_hold(&ctx->completion_lock)
|
||||
{
|
||||
u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
|
||||
struct io_kiocb *req, *tmp;
|
||||
|
||||
spin_lock_irq(&ctx->timeout_lock);
|
||||
while (!list_empty(&ctx->timeout_list)) {
|
||||
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
|
||||
u32 events_needed, events_got;
|
||||
struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
|
||||
struct io_kiocb, timeout.list);
|
||||
|
||||
if (io_is_timeout_noseq(req))
|
||||
break;
|
||||
@ -1758,7 +1757,6 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
|
||||
if (events_got < events_needed)
|
||||
break;
|
||||
|
||||
list_del_init(&req->timeout.list);
|
||||
io_kill_timeout(req, 0);
|
||||
}
|
||||
ctx->cq_last_tm_flush = seq;
|
||||
@ -6628,6 +6626,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
|
||||
return -EINVAL;
|
||||
|
||||
INIT_LIST_HEAD(&req->timeout.list);
|
||||
data->mode = io_translate_timeout_mode(flags);
|
||||
hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user