mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-14 15:54:15 +08:00
b48c312be0
Following user feedback, this patch simplifies zerocopy send API. One of the main complaints is that the current API is difficult with the userspace managing notification slots, and then send retries with error handling make it even worse. Instead of keeping notification slots change it to the per-request notifications model, which posts both completion and notification CQEs for each request when any data has been sent, and only one CQE if it fails. All notification CQEs will have IORING_CQE_F_NOTIF set and IORING_CQE_F_MORE in completion CQEs indicates whether to wait a notification or not. IOSQE_CQE_SKIP_SUCCESS is disallowed with zerocopy sends for now. This is less flexible, but greatly simplifies the user API and also the kernel implementation. We reuse notif helpers in this patch, but in the future there won't be need for keeping two requests. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/95287640ab98fc9417370afb16e310677c63e6ce.1662027856.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
66 lines
2.1 KiB
C
66 lines
2.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/net.h>
|
|
#include <linux/uio.h>
|
|
|
|
#include "alloc_cache.h"
|
|
|
|
#if defined(CONFIG_NET)
|
|
struct io_async_msghdr {
|
|
union {
|
|
struct iovec fast_iov[UIO_FASTIOV];
|
|
struct {
|
|
struct iovec fast_iov_one;
|
|
__kernel_size_t controllen;
|
|
int namelen;
|
|
__kernel_size_t payloadlen;
|
|
};
|
|
struct io_cache_entry cache;
|
|
};
|
|
/* points to an allocated iov, if NULL we use fast_iov instead */
|
|
struct iovec *free_iov;
|
|
struct sockaddr __user *uaddr;
|
|
struct msghdr msg;
|
|
struct sockaddr_storage addr;
|
|
};
|
|
|
|
struct io_async_connect {
|
|
struct sockaddr_storage address;
|
|
};
|
|
|
|
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_sendzc_prep_async(struct io_kiocb *req);
|
|
int io_sendmsg_prep_async(struct io_kiocb *req);
|
|
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
|
|
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_send(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_recvmsg_prep_async(struct io_kiocb *req);
|
|
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_recv(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_accept(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_socket(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_connect_prep_async(struct io_kiocb *req);
|
|
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_connect(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
void io_sendzc_cleanup(struct io_kiocb *req);
|
|
|
|
void io_netmsg_cache_free(struct io_cache_entry *entry);
|
|
#else
|
|
static inline void io_netmsg_cache_free(struct io_cache_entry *entry)
|
|
{
|
|
}
|
|
#endif
|