io_uring: propagate issue_flags state down to file assignment

We'll need this in a future patch, when we could be assigning the file
after the prep stage. While at it, get rid of the io_file_get() helper,
it just makes the code harder to read.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2022-04-04 17:18:43 -06:00
parent 584b0180f0
commit 5106dd6e74

View File

@ -1183,8 +1183,9 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
struct io_uring_rsrc_update2 *up,
unsigned nr_args);
static void io_clean_op(struct io_kiocb *req);
static struct file *io_file_get(struct io_ring_ctx *ctx,
struct io_kiocb *req, int fd, bool fixed);
static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned issue_flags);
static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
static void __io_queue_sqe(struct io_kiocb *req);
static void io_rsrc_put_work(struct work_struct *work);
@ -1314,13 +1315,20 @@ static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
}
static inline void io_req_set_rsrc_node(struct io_kiocb *req,
struct io_ring_ctx *ctx)
struct io_ring_ctx *ctx,
unsigned int issue_flags)
{
if (!req->fixed_rsrc_refs) {
req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
ctx->rsrc_cached_refs--;
if (unlikely(ctx->rsrc_cached_refs < 0))
io_rsrc_refs_refill(ctx);
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
lockdep_assert_held(&ctx->uring_lock);
ctx->rsrc_cached_refs--;
if (unlikely(ctx->rsrc_cached_refs < 0))
io_rsrc_refs_refill(ctx);
} else {
percpu_ref_get(req->fixed_rsrc_refs);
}
}
}
@ -3330,7 +3338,8 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter
return 0;
}
static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
unsigned int issue_flags)
{
struct io_mapped_ubuf *imu = req->imu;
u16 index, buf_index = req->buf_index;
@ -3340,7 +3349,7 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
if (unlikely(buf_index >= ctx->nr_user_bufs))
return -EFAULT;
io_req_set_rsrc_node(req, ctx);
io_req_set_rsrc_node(req, ctx, issue_flags);
index = array_index_nospec(buf_index, ctx->nr_user_bufs);
imu = READ_ONCE(ctx->user_bufs[index]);
req->imu = imu;
@ -3502,7 +3511,7 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
ssize_t ret;
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
ret = io_import_fixed(req, rw, iter);
ret = io_import_fixed(req, rw, iter, issue_flags);
if (ret)
return ERR_PTR(ret);
return NULL;
@ -4394,8 +4403,10 @@ static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
in = io_file_get(req->ctx, req, sp->splice_fd_in,
(sp->flags & SPLICE_F_FD_IN_FIXED));
if (sp->flags & SPLICE_F_FD_IN_FIXED)
in = io_file_get_fixed(req, sp->splice_fd_in, IO_URING_F_UNLOCKED);
else
in = io_file_get_normal(req, sp->splice_fd_in);
if (!in) {
ret = -EBADF;
goto done;
@ -4434,8 +4445,10 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
in = io_file_get(req->ctx, req, sp->splice_fd_in,
(sp->flags & SPLICE_F_FD_IN_FIXED));
if (sp->flags & SPLICE_F_FD_IN_FIXED)
in = io_file_get_fixed(req, sp->splice_fd_in, IO_URING_F_UNLOCKED);
else
in = io_file_get_normal(req, sp->splice_fd_in);
if (!in) {
ret = -EBADF;
goto done;
@ -5973,7 +5986,7 @@ static void io_poll_remove_entries(struct io_kiocb *req)
* either spurious wakeup or multishot CQE is served. 0 when it's done with
* the request, then the mask is stored in req->result.
*/
static int io_poll_check_events(struct io_kiocb *req)
static int io_poll_check_events(struct io_kiocb *req, bool locked)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_poll_iocb *poll = io_poll_get_single(req);
@ -6030,7 +6043,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
struct io_ring_ctx *ctx = req->ctx;
int ret;
ret = io_poll_check_events(req);
ret = io_poll_check_events(req, *locked);
if (ret > 0)
return;
@ -6055,7 +6068,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
struct io_ring_ctx *ctx = req->ctx;
int ret;
ret = io_poll_check_events(req);
ret = io_poll_check_events(req, *locked);
if (ret > 0)
return;
@ -7460,30 +7473,36 @@ static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file
file_slot->file_ptr = file_ptr;
}
static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
struct io_kiocb *req, int fd)
static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned int issue_flags)
{
struct file *file;
struct io_ring_ctx *ctx = req->ctx;
struct file *file = NULL;
unsigned long file_ptr;
if (issue_flags & IO_URING_F_UNLOCKED)
mutex_lock(&ctx->uring_lock);
if (unlikely((unsigned int)fd >= ctx->nr_user_files))
return NULL;
goto out;
fd = array_index_nospec(fd, ctx->nr_user_files);
file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
file = (struct file *) (file_ptr & FFS_MASK);
file_ptr &= ~FFS_MASK;
/* mask in overlapping REQ_F and FFS bits */
req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
io_req_set_rsrc_node(req, ctx);
io_req_set_rsrc_node(req, ctx, 0);
out:
if (issue_flags & IO_URING_F_UNLOCKED)
mutex_unlock(&ctx->uring_lock);
return file;
}
static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
struct io_kiocb *req, int fd)
static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
{
struct file *file = fget(fd);
trace_io_uring_file_get(ctx, req, req->user_data, fd);
trace_io_uring_file_get(req->ctx, req, req->user_data, fd);
/* we don't allow fixed io_uring files */
if (file && unlikely(file->f_op == &io_uring_fops))
@ -7491,15 +7510,6 @@ static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
return file;
}
static inline struct file *io_file_get(struct io_ring_ctx *ctx,
struct io_kiocb *req, int fd, bool fixed)
{
if (fixed)
return io_file_get_fixed(ctx, req, fd);
else
return io_file_get_normal(ctx, req, fd);
}
static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
{
struct io_kiocb *prev = req->timeout.prev;
@ -7749,8 +7759,10 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
blk_start_plug_nr_ios(&state->plug, state->submit_nr);
}
req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
(sqe_flags & IOSQE_FIXED_FILE));
if (req->flags & REQ_F_FIXED_FILE)
req->file = io_file_get_fixed(req, READ_ONCE(sqe->fd), 0);
else
req->file = io_file_get_normal(req, READ_ONCE(sqe->fd));
if (unlikely(!req->file))
return -EBADF;
}