mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
io_uring: fix -EAGAIN retry with IOPOLL
We no longer revert the iovec on -EIOCBQUEUED, see commit ab2125df92
,
and this started causing issues for IOPOLL on devies that run out of
request slots. Turns out what outside of needing a revert for those, we
also had a bug where we didn't properly setup retry inside the submission
path. That could cause re-import of the iovec, if any, and that could lead
to spurious results if the application had those allocated on the stack.
Catch -EAGAIN retry and make the iovec stable for IOPOLL, just like we do
for !IOPOLL retries.
Cc: <stable@vger.kernel.org> # 5.9+
Reported-by: Abaci Robot <abaci@linux.alibaba.com>
Reported-by: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
dc7bbc9ef3
commit
3e6a0d3c75
@ -2423,23 +2423,32 @@ static bool io_resubmit_prep(struct io_kiocb *req)
|
|||||||
return false;
|
return false;
|
||||||
return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
|
return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static bool io_rw_reissue(struct io_kiocb *req)
|
static bool io_rw_should_reissue(struct io_kiocb *req)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_BLOCK
|
|
||||||
umode_t mode = file_inode(req->file)->i_mode;
|
umode_t mode = file_inode(req->file)->i_mode;
|
||||||
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
|
||||||
if (!S_ISBLK(mode) && !S_ISREG(mode))
|
if (!S_ISBLK(mode) && !S_ISREG(mode))
|
||||||
return false;
|
return false;
|
||||||
if ((req->flags & REQ_F_NOWAIT) || io_wq_current_is_worker())
|
if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
|
||||||
|
!(ctx->flags & IORING_SETUP_IOPOLL)))
|
||||||
return false;
|
return false;
|
||||||
/*
|
/*
|
||||||
* If ref is dying, we might be running poll reap from the exit work.
|
* If ref is dying, we might be running poll reap from the exit work.
|
||||||
* Don't attempt to reissue from that path, just let it fail with
|
* Don't attempt to reissue from that path, just let it fail with
|
||||||
* -EAGAIN.
|
* -EAGAIN.
|
||||||
*/
|
*/
|
||||||
if (percpu_ref_is_dying(&req->ctx->refs))
|
if (percpu_ref_is_dying(&ctx->refs))
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static bool io_rw_reissue(struct io_kiocb *req)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_BLOCK
|
||||||
|
if (!io_rw_should_reissue(req))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
lockdep_assert_held(&req->ctx->uring_lock);
|
lockdep_assert_held(&req->ctx->uring_lock);
|
||||||
@ -2482,6 +2491,19 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
|
|||||||
{
|
{
|
||||||
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
|
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
|
||||||
|
|
||||||
|
#ifdef CONFIG_BLOCK
|
||||||
|
/* Rewind iter, if we have one. iopoll path resubmits as usual */
|
||||||
|
if (res == -EAGAIN && io_rw_should_reissue(req)) {
|
||||||
|
struct io_async_rw *rw = req->async_data;
|
||||||
|
|
||||||
|
if (rw)
|
||||||
|
iov_iter_revert(&rw->iter,
|
||||||
|
req->result - iov_iter_count(&rw->iter));
|
||||||
|
else if (!io_resubmit_prep(req))
|
||||||
|
res = -EIO;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (kiocb->ki_flags & IOCB_WRITE)
|
if (kiocb->ki_flags & IOCB_WRITE)
|
||||||
kiocb_end_write(req);
|
kiocb_end_write(req);
|
||||||
|
|
||||||
@ -3230,6 +3252,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
ret = io_iter_do_read(req, iter);
|
ret = io_iter_do_read(req, iter);
|
||||||
|
|
||||||
if (ret == -EIOCBQUEUED) {
|
if (ret == -EIOCBQUEUED) {
|
||||||
|
if (req->async_data)
|
||||||
|
iov_iter_revert(iter, io_size - iov_iter_count(iter));
|
||||||
goto out_free;
|
goto out_free;
|
||||||
} else if (ret == -EAGAIN) {
|
} else if (ret == -EAGAIN) {
|
||||||
/* IOPOLL retry should happen for io-wq threads */
|
/* IOPOLL retry should happen for io-wq threads */
|
||||||
@ -3361,6 +3385,8 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
|
|||||||
/* no retry on NONBLOCK nor RWF_NOWAIT */
|
/* no retry on NONBLOCK nor RWF_NOWAIT */
|
||||||
if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
|
if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
|
||||||
goto done;
|
goto done;
|
||||||
|
if (ret2 == -EIOCBQUEUED && req->async_data)
|
||||||
|
iov_iter_revert(iter, io_size - iov_iter_count(iter));
|
||||||
if (!force_nonblock || ret2 != -EAGAIN) {
|
if (!force_nonblock || ret2 != -EAGAIN) {
|
||||||
/* IOPOLL retry should happen for io-wq threads */
|
/* IOPOLL retry should happen for io-wq threads */
|
||||||
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
|
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
|
||||||
|
Loading…
Reference in New Issue
Block a user