mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 22:56:27 +08:00
io_uring: remove 'sqe' parameter to the OP helpers that take it
We pass in req->sqe for all of them, no need to pass it in as the request is always passed in. This is a necessary prep patch to be able to cleanup/fix the request prep path. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b7bb4f7da0
commit
fc4df999e2
@ -1949,8 +1949,9 @@ static int io_nop(struct io_kiocb *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
static int io_prep_fsync(struct io_kiocb *req)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
if (!req->file)
|
||||
@ -1964,9 +1965,10 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
struct io_kiocb **nxt, bool force_nonblock)
|
||||
static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
bool force_nonblock)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
loff_t sqe_off = READ_ONCE(sqe->off);
|
||||
loff_t sqe_len = READ_ONCE(sqe->len);
|
||||
loff_t end = sqe_off + sqe_len;
|
||||
@ -1977,7 +1979,7 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
|
||||
return -EINVAL;
|
||||
|
||||
ret = io_prep_fsync(req, sqe);
|
||||
ret = io_prep_fsync(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1996,8 +1998,9 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
static int io_prep_sfr(struct io_kiocb *req)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
int ret = 0;
|
||||
|
||||
@ -2012,17 +2015,16 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_sync_file_range(struct io_kiocb *req,
|
||||
const struct io_uring_sqe *sqe,
|
||||
struct io_kiocb **nxt,
|
||||
static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
bool force_nonblock)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
loff_t sqe_off;
|
||||
loff_t sqe_len;
|
||||
unsigned flags;
|
||||
int ret;
|
||||
|
||||
ret = io_prep_sfr(req, sqe);
|
||||
ret = io_prep_sfr(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2072,10 +2074,11 @@ static int io_sendmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
struct io_kiocb **nxt, bool force_nonblock)
|
||||
static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
bool force_nonblock)
|
||||
{
|
||||
#if defined(CONFIG_NET)
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
struct io_async_msghdr *kmsg = NULL;
|
||||
struct socket *sock;
|
||||
int ret;
|
||||
@ -2154,10 +2157,11 @@ static int io_recvmsg_prep(struct io_kiocb *req, struct io_async_ctx *io)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
struct io_kiocb **nxt, bool force_nonblock)
|
||||
static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
bool force_nonblock)
|
||||
{
|
||||
#if defined(CONFIG_NET)
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
struct io_async_msghdr *kmsg = NULL;
|
||||
struct socket *sock;
|
||||
int ret;
|
||||
@ -2222,10 +2226,11 @@ out:
|
||||
#endif
|
||||
}
|
||||
|
||||
static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
struct io_kiocb **nxt, bool force_nonblock)
|
||||
static int io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
bool force_nonblock)
|
||||
{
|
||||
#if defined(CONFIG_NET)
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
struct sockaddr __user *addr;
|
||||
int __user *addr_len;
|
||||
unsigned file_flags;
|
||||
@ -2273,10 +2278,11 @@ static int io_connect_prep(struct io_kiocb *req, struct io_async_ctx *io)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int io_connect(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
struct io_kiocb **nxt, bool force_nonblock)
|
||||
static int io_connect(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
bool force_nonblock)
|
||||
{
|
||||
#if defined(CONFIG_NET)
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
struct io_async_ctx __io, *io;
|
||||
unsigned file_flags;
|
||||
int addr_len, ret;
|
||||
@ -2374,8 +2380,9 @@ static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
|
||||
* Find a running poll command that matches one specified in sqe->addr,
|
||||
* and remove it if found.
|
||||
*/
|
||||
static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
static int io_poll_remove(struct io_kiocb *req)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
int ret;
|
||||
|
||||
@ -2521,9 +2528,9 @@ static void io_poll_req_insert(struct io_kiocb *req)
|
||||
hlist_add_head(&req->hash_node, list);
|
||||
}
|
||||
|
||||
static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
struct io_kiocb **nxt)
|
||||
static int io_poll_add(struct io_kiocb *req, struct io_kiocb **nxt)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
struct io_poll_iocb *poll = &req->poll;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_poll_table ipt;
|
||||
@ -2660,9 +2667,9 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
|
||||
/*
|
||||
* Remove or update an existing timeout command
|
||||
*/
|
||||
static int io_timeout_remove(struct io_kiocb *req,
|
||||
const struct io_uring_sqe *sqe)
|
||||
static int io_timeout_remove(struct io_kiocb *req)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
unsigned flags;
|
||||
int ret;
|
||||
@ -2721,8 +2728,9 @@ static int io_timeout_prep(struct io_kiocb *req, struct io_async_ctx *io,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
static int io_timeout(struct io_kiocb *req)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
unsigned count;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_timeout_data *data;
|
||||
@ -2862,9 +2870,9 @@ done:
|
||||
io_put_req_find_next(req, nxt);
|
||||
}
|
||||
|
||||
static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
|
||||
struct io_kiocb **nxt)
|
||||
static int io_async_cancel(struct io_kiocb *req, struct io_kiocb **nxt)
|
||||
{
|
||||
const struct io_uring_sqe *sqe = req->sqe;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
|
||||
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
|
||||
@ -2987,37 +2995,37 @@ static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
ret = io_write(req, nxt, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_FSYNC:
|
||||
ret = io_fsync(req, req->sqe, nxt, force_nonblock);
|
||||
ret = io_fsync(req, nxt, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_POLL_ADD:
|
||||
ret = io_poll_add(req, req->sqe, nxt);
|
||||
ret = io_poll_add(req, nxt);
|
||||
break;
|
||||
case IORING_OP_POLL_REMOVE:
|
||||
ret = io_poll_remove(req, req->sqe);
|
||||
ret = io_poll_remove(req);
|
||||
break;
|
||||
case IORING_OP_SYNC_FILE_RANGE:
|
||||
ret = io_sync_file_range(req, req->sqe, nxt, force_nonblock);
|
||||
ret = io_sync_file_range(req, nxt, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_SENDMSG:
|
||||
ret = io_sendmsg(req, req->sqe, nxt, force_nonblock);
|
||||
ret = io_sendmsg(req, nxt, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_RECVMSG:
|
||||
ret = io_recvmsg(req, req->sqe, nxt, force_nonblock);
|
||||
ret = io_recvmsg(req, nxt, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_TIMEOUT:
|
||||
ret = io_timeout(req, req->sqe);
|
||||
ret = io_timeout(req);
|
||||
break;
|
||||
case IORING_OP_TIMEOUT_REMOVE:
|
||||
ret = io_timeout_remove(req, req->sqe);
|
||||
ret = io_timeout_remove(req);
|
||||
break;
|
||||
case IORING_OP_ACCEPT:
|
||||
ret = io_accept(req, req->sqe, nxt, force_nonblock);
|
||||
ret = io_accept(req, nxt, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_CONNECT:
|
||||
ret = io_connect(req, req->sqe, nxt, force_nonblock);
|
||||
ret = io_connect(req, nxt, force_nonblock);
|
||||
break;
|
||||
case IORING_OP_ASYNC_CANCEL:
|
||||
ret = io_async_cancel(req, req->sqe, nxt);
|
||||
ret = io_async_cancel(req, nxt);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
Loading…
Reference in New Issue
Block a user