2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 15:43:59 +08:00

io_uring: add IORING_SETUP_SINGLE_ISSUER

Add a new IORING_SETUP_SINGLE_ISSUER flag and the userspace visible part
of it, i.e. put limitations of submitters. Also, don't allow it together
with IOPOLL as we're not going to put it to good use.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/4bcc41ee467fdf04c8aab8baf6ce3ba21858c3d4.1655371007.git.asml.silence@gmail.com
Reviewed-by: Hao Xu <howeyxu@tencent.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2022-06-16 10:22:08 +01:00 committed by Jens Axboe
parent 0ec6dca223
commit 97bbdc06a4
5 changed files with 36 additions and 8 deletions

View File

@ -140,9 +140,12 @@ enum {
* IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
*/
#define IORING_SETUP_TASKRUN_FLAG (1U << 9)
#define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */
#define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */
/*
* Only one task is allowed to submit requests
*/
#define IORING_SETUP_SINGLE_ISSUER (1U << 12)
enum io_uring_op {
IORING_OP_NOP,

View File

@ -2457,6 +2457,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_destroy_buffers(ctx);
if (ctx->sq_creds)
put_cred(ctx->sq_creds);
if (ctx->submitter_task)
put_task_struct(ctx->submitter_task);
/* there are no registered resources left, nobody uses it */
if (ctx->rsrc_node)
@ -3189,7 +3191,7 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
if (fd < 0)
return fd;
ret = io_uring_add_tctx_node(ctx);
ret = __io_uring_add_tctx_node(ctx, false);
if (ret) {
put_unused_fd(fd);
return ret;
@ -3409,7 +3411,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
IORING_SETUP_SQE128 | IORING_SETUP_CQE32))
IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
IORING_SETUP_SINGLE_ISSUER))
return -EINVAL;
return io_uring_create(entries, &p, params);

View File

@ -243,6 +243,7 @@ struct io_ring_ctx {
/* Keep this last, we don't need it for the fast path */
struct io_restriction restrictions;
struct task_struct *submitter_task;
/* slow path rsrc auxilary data, used by update/register */
struct io_rsrc_node *rsrc_backup_node;

View File

@ -94,12 +94,32 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
return 0;
}
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
static int io_register_submitter(struct io_ring_ctx *ctx)
{
int ret = 0;
mutex_lock(&ctx->uring_lock);
if (!ctx->submitter_task)
ctx->submitter_task = get_task_struct(current);
else if (ctx->submitter_task != current)
ret = -EEXIST;
mutex_unlock(&ctx->uring_lock);
return ret;
}
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter)
{
struct io_uring_task *tctx = current->io_uring;
struct io_tctx_node *node;
int ret;
if ((ctx->flags & IORING_SETUP_SINGLE_ISSUER) && submitter) {
ret = io_register_submitter(ctx);
if (ret)
return ret;
}
if (unlikely(!tctx)) {
ret = io_uring_alloc_task_context(current, ctx);
if (unlikely(ret))
@ -133,7 +153,8 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
list_add(&node->ctx_node, &ctx->tctx_list);
mutex_unlock(&ctx->uring_lock);
}
tctx->last = ctx;
if (submitter)
tctx->last = ctx;
return 0;
}
@ -241,7 +262,7 @@ int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
return -EINVAL;
mutex_unlock(&ctx->uring_lock);
ret = io_uring_add_tctx_node(ctx);
ret = __io_uring_add_tctx_node(ctx, false);
mutex_lock(&ctx->uring_lock);
if (ret)
return ret;

View File

@ -34,7 +34,7 @@ struct io_tctx_node {
int io_uring_alloc_task_context(struct task_struct *task,
struct io_ring_ctx *ctx);
void io_uring_del_tctx_node(unsigned long index);
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx);
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter);
void io_uring_clean_tctx(struct io_uring_task *tctx);
void io_uring_unreg_ringfd(void);
@ -52,5 +52,5 @@ static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
if (likely(tctx && tctx->last == ctx))
return 0;
return __io_uring_add_tctx_node(ctx);
return __io_uring_add_tctx_node(ctx, true);
}