io_uring: get rid of kiocb_wait_page_queue_init()

The 5.9 merge moved this function io_uring, which means that we don't
need to retain the generic nature of it. Clean up this part by removing
redundant checks, and just inlining the small remainder in
io_rw_should_retry().

No functional changes in this patch.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2020-08-16 10:58:43 -07:00
parent b711d4eaf0
commit 3b2a4439e0

View File

@ -3074,27 +3074,6 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
return 1; return 1;
} }
static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
struct wait_page_queue *wait,
wait_queue_func_t func,
void *data)
{
/* Can't support async wakeup with polled IO */
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
if (kiocb->ki_filp->f_mode & FMODE_BUF_RASYNC) {
wait->wait.func = func;
wait->wait.private = data;
wait->wait.flags = 0;
INIT_LIST_HEAD(&wait->wait.entry);
kiocb->ki_flags |= IOCB_WAITQ;
kiocb->ki_waitq = wait;
return 0;
}
return -EOPNOTSUPP;
}
/* /*
* This controls whether a given IO request should be armed for async page * This controls whether a given IO request should be armed for async page
* based retry. If we return false here, the request is handed to the async * based retry. If we return false here, the request is handed to the async
@ -3109,16 +3088,17 @@ static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
*/ */
static bool io_rw_should_retry(struct io_kiocb *req) static bool io_rw_should_retry(struct io_kiocb *req)
{ {
struct wait_page_queue *wait = &req->io->rw.wpq;
struct kiocb *kiocb = &req->rw.kiocb; struct kiocb *kiocb = &req->rw.kiocb;
int ret;
/* never retry for NOWAIT, we just complete with -EAGAIN */ /* never retry for NOWAIT, we just complete with -EAGAIN */
if (req->flags & REQ_F_NOWAIT) if (req->flags & REQ_F_NOWAIT)
return false; return false;
/* Only for buffered IO */ /* Only for buffered IO */
if (kiocb->ki_flags & IOCB_DIRECT) if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
return false; return false;
/* /*
* just use poll if we can, and don't attempt if the fs doesn't * just use poll if we can, and don't attempt if the fs doesn't
* support callback based unlocks * support callback based unlocks
@ -3126,14 +3106,15 @@ static bool io_rw_should_retry(struct io_kiocb *req)
if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
return false; return false;
ret = kiocb_wait_page_queue_init(kiocb, &req->io->rw.wpq, wait->wait.func = io_async_buf_func;
io_async_buf_func, req); wait->wait.private = req;
if (!ret) { wait->wait.flags = 0;
INIT_LIST_HEAD(&wait->wait.entry);
kiocb->ki_flags |= IOCB_WAITQ;
kiocb->ki_waitq = wait;
io_get_req_task(req); io_get_req_task(req);
return true; return true;
}
return false;
} }
static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)