io_uring/poll: get rid of io_poll_tw_hash_eject()

It serves no purposes anymore, all it does is delete the hash list
entry. task_work always has the ring locked.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2024-09-30 14:35:52 -06:00
parent 085268829b
commit 879ba46a38

View File

@ -128,20 +128,6 @@ static void io_poll_req_insert(struct io_kiocb *req)
hlist_add_head(&req->hash_node, &table->hbs[index].list); hlist_add_head(&req->hash_node, &table->hbs[index].list);
} }
static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
{
struct io_ring_ctx *ctx = req->ctx;
/*
* ->cancel_table_locked is protected by ->uring_lock in
* contrast to per bucket spinlocks. Likely, tctx_task_work()
* already grabbed the mutex for us, but there is a chance it
* failed.
*/
io_tw_lock(ctx, ts);
hash_del(&req->hash_node);
}
static void io_init_poll_iocb(struct io_poll *poll, __poll_t events) static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
{ {
poll->head = NULL; poll->head = NULL;
@ -336,7 +322,8 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
return; return;
} }
io_poll_remove_entries(req); io_poll_remove_entries(req);
io_poll_tw_hash_eject(req, ts); /* task_work always has ->uring_lock held */
hash_del(&req->hash_node);
if (req->opcode == IORING_OP_POLL_ADD) { if (req->opcode == IORING_OP_POLL_ADD) {
if (ret == IOU_POLL_DONE) { if (ret == IOU_POLL_DONE) {