io_uring/net: switch io_recv() to using io_async_msghdr

No functional changes in this patch, just in preparation for carrying
more state than what is available now, if necessary.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2024-03-05 15:39:16 -07:00
parent 54cdcca05a
commit 4a3223f7bf
3 changed files with 53 additions and 31 deletions

View File

@ -320,7 +320,7 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
return ret;
}
int io_send_prep_async(struct io_kiocb *req)
int io_sendrecv_prep_async(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io;
@ -703,13 +703,13 @@ static inline void io_recv_prep_retry(struct io_kiocb *req)
* again (for multishot).
*/
static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
struct msghdr *msg, bool mshot_finished,
unsigned issue_flags)
struct io_async_msghdr *kmsg,
bool mshot_finished, unsigned issue_flags)
{
unsigned int cflags;
cflags = io_put_kbuf(req, issue_flags);
if (msg->msg_inq > 0)
if (kmsg->msg.msg_inq > 0)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
/*
@ -723,7 +723,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
io_recv_prep_retry(req);
/* Known not-empty or unknown state, retry */
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq < 0) {
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
return false;
/* mshot retries exceeded, force a requeue */
@ -924,7 +924,7 @@ retry_multishot:
else
io_kbuf_recycle(req, issue_flags);
if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
goto retry_multishot;
if (mshot_finished)
@ -938,29 +938,42 @@ retry_multishot:
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct msghdr msg;
struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
size_t len = sr->len;
if (req_has_async_data(req)) {
kmsg = req->async_data;
} else {
kmsg = &iomsg;
kmsg->free_iov = NULL;
kmsg->msg.msg_name = NULL;
kmsg->msg.msg_namelen = 0;
kmsg->msg.msg_control = NULL;
kmsg->msg.msg_get_inq = 1;
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_iocb = NULL;
kmsg->msg.msg_ubuf = NULL;
if (!io_do_buffer_select(req)) {
ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
&kmsg->msg.msg_iter);
if (unlikely(ret))
return ret;
}
}
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
return io_setup_async_msg(req, kmsg, issue_flags);
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_get_inq = 1;
msg.msg_controllen = 0;
msg.msg_iocb = NULL;
msg.msg_ubuf = NULL;
flags = sr->msg_flags;
if (force_nonblock)
flags |= MSG_DONTWAIT;
@ -974,22 +987,23 @@ retry_multishot:
return -ENOBUFS;
sr->buf = buf;
sr->len = len;
}
ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
&kmsg->msg.msg_iter);
if (unlikely(ret))
goto out_free;
}
msg.msg_inq = -1;
msg.msg_flags = 0;
kmsg->msg.msg_inq = -1;
kmsg->msg.msg_flags = 0;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&msg.msg_iter);
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
ret = sock_recvmsg(sock, &msg, flags);
ret = sock_recvmsg(sock, &kmsg->msg, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
if (issue_flags & IO_URING_F_MULTISHOT) {
ret = io_setup_async_msg(req, kmsg, issue_flags);
if (ret == -EAGAIN && issue_flags & IO_URING_F_MULTISHOT) {
io_kbuf_recycle(req, issue_flags);
return IOU_ISSUE_SKIP_COMPLETE;
}
@ -1001,12 +1015,12 @@ retry_multishot:
sr->buf += ret;
sr->done_io += ret;
req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
return io_setup_async_msg(req, kmsg, issue_flags);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
out_free:
req_set_fail(req);
}
@ -1018,9 +1032,14 @@ out_free:
else
io_kbuf_recycle(req, issue_flags);
if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
if (!io_recv_finish(req, &ret, kmsg, ret <= 0, issue_flags))
goto retry_multishot;
if (ret == -EAGAIN)
return io_setup_async_msg(req, kmsg, issue_flags);
else if (ret != IOU_OK && ret != IOU_STOP_MULTISHOT)
io_req_msg_cleanup(req, kmsg, issue_flags);
return ret;
}

View File

@ -40,7 +40,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
int io_send(struct io_kiocb *req, unsigned int issue_flags);
int io_send_prep_async(struct io_kiocb *req);
int io_sendrecv_prep_async(struct io_kiocb *req);
int io_recvmsg_prep_async(struct io_kiocb *req);
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);

View File

@ -604,13 +604,16 @@ const struct io_cold_def io_cold_defs[] = {
.async_size = sizeof(struct io_async_msghdr),
.cleanup = io_sendmsg_recvmsg_cleanup,
.fail = io_sendrecv_fail,
.prep_async = io_send_prep_async,
.prep_async = io_sendrecv_prep_async,
#endif
},
[IORING_OP_RECV] = {
.name = "RECV",
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.cleanup = io_sendmsg_recvmsg_cleanup,
.fail = io_sendrecv_fail,
.prep_async = io_sendrecv_prep_async,
#endif
},
[IORING_OP_OPENAT2] = {
@ -687,7 +690,7 @@ const struct io_cold_def io_cold_defs[] = {
.name = "SEND_ZC",
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep_async = io_send_prep_async,
.prep_async = io_sendrecv_prep_async,
.cleanup = io_send_zc_cleanup,
.fail = io_sendrecv_fail,
#endif