mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
io_uring/kbuf: get rid of lower BGID lists
commit 09ab7eff38
upstream.
Just rely on the xarray for any kind of bgid. This simplifies things, and
it really doesn't bring us much, if anything.
Cc: stable@vger.kernel.org # v6.4+
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
781477d729
commit
d6e03f6d8b
@ -250,7 +250,6 @@ struct io_ring_ctx {
|
||||
|
||||
struct io_submit_state submit_state;
|
||||
|
||||
struct io_buffer_list *io_bl;
|
||||
struct xarray io_bl_xa;
|
||||
|
||||
struct io_hash_table cancel_table_locked;
|
||||
|
@ -343,7 +343,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
err:
|
||||
kfree(ctx->cancel_table.hbs);
|
||||
kfree(ctx->cancel_table_locked.hbs);
|
||||
kfree(ctx->io_bl);
|
||||
xa_destroy(&ctx->io_bl_xa);
|
||||
kfree(ctx);
|
||||
return NULL;
|
||||
@ -2934,7 +2933,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
||||
io_wq_put_hash(ctx->hash_map);
|
||||
kfree(ctx->cancel_table.hbs);
|
||||
kfree(ctx->cancel_table_locked.hbs);
|
||||
kfree(ctx->io_bl);
|
||||
xa_destroy(&ctx->io_bl_xa);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
@ -17,8 +17,6 @@
|
||||
|
||||
#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
|
||||
|
||||
#define BGID_ARRAY 64
|
||||
|
||||
/* BIDs are addressed by a 16-bit field in a CQE */
|
||||
#define MAX_BIDS_PER_BGID (1 << 16)
|
||||
|
||||
@ -31,13 +29,9 @@ struct io_provide_buf {
|
||||
__u16 bid;
|
||||
};
|
||||
|
||||
static struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
|
||||
struct io_buffer_list *bl,
|
||||
unsigned int bgid)
|
||||
static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx,
|
||||
unsigned int bgid)
|
||||
{
|
||||
if (bl && bgid < BGID_ARRAY)
|
||||
return &bl[bgid];
|
||||
|
||||
return xa_load(&ctx->io_bl_xa, bgid);
|
||||
}
|
||||
|
||||
@ -53,7 +47,7 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
|
||||
{
|
||||
lockdep_assert_held(&ctx->uring_lock);
|
||||
|
||||
return __io_buffer_get_list(ctx, ctx->io_bl, bgid);
|
||||
return __io_buffer_get_list(ctx, bgid);
|
||||
}
|
||||
|
||||
static int io_buffer_add_list(struct io_ring_ctx *ctx,
|
||||
@ -66,10 +60,6 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
|
||||
*/
|
||||
bl->bgid = bgid;
|
||||
smp_store_release(&bl->is_ready, 1);
|
||||
|
||||
if (bgid < BGID_ARRAY)
|
||||
return 0;
|
||||
|
||||
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
|
||||
}
|
||||
|
||||
@ -215,24 +205,6 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_buffer_list *bl;
|
||||
int i;
|
||||
|
||||
bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL);
|
||||
if (!bl)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < BGID_ARRAY; i++) {
|
||||
INIT_LIST_HEAD(&bl[i].buf_list);
|
||||
bl[i].bgid = i;
|
||||
}
|
||||
|
||||
smp_store_release(&ctx->io_bl, bl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the given mapped range as free for reuse
|
||||
*/
|
||||
@ -305,13 +277,6 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_buffer_list *bl;
|
||||
unsigned long index;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BGID_ARRAY; i++) {
|
||||
if (!ctx->io_bl)
|
||||
break;
|
||||
__io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
|
||||
}
|
||||
|
||||
xa_for_each(&ctx->io_bl_xa, index, bl) {
|
||||
xa_erase(&ctx->io_bl_xa, bl->bgid);
|
||||
@ -485,12 +450,6 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
||||
|
||||
io_ring_submit_lock(ctx, issue_flags);
|
||||
|
||||
if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
|
||||
ret = io_init_bl_list(ctx);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
bl = io_buffer_get_list(ctx, p->bgid);
|
||||
if (unlikely(!bl)) {
|
||||
bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
|
||||
@ -503,14 +462,9 @@ int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
|
||||
if (ret) {
|
||||
/*
|
||||
* Doesn't need rcu free as it was never visible, but
|
||||
* let's keep it consistent throughout. Also can't
|
||||
* be a lower indexed array group, as adding one
|
||||
* where lookup failed cannot happen.
|
||||
* let's keep it consistent throughout.
|
||||
*/
|
||||
if (p->bgid >= BGID_ARRAY)
|
||||
kfree_rcu(bl, rcu);
|
||||
else
|
||||
WARN_ON_ONCE(1);
|
||||
kfree_rcu(bl, rcu);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@ -675,12 +629,6 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
|
||||
if (reg.ring_entries >= 65536)
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
|
||||
int ret = io_init_bl_list(ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bl = io_buffer_get_list(ctx, reg.bgid);
|
||||
if (bl) {
|
||||
/* if mapped buffer ring OR classic exists, don't allow */
|
||||
@ -730,10 +678,8 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
|
||||
return -EINVAL;
|
||||
|
||||
__io_remove_buffers(ctx, bl, -1U);
|
||||
if (bl->bgid >= BGID_ARRAY) {
|
||||
xa_erase(&ctx->io_bl_xa, bl->bgid);
|
||||
kfree_rcu(bl, rcu);
|
||||
}
|
||||
xa_erase(&ctx->io_bl_xa, bl->bgid);
|
||||
kfree_rcu(bl, rcu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -741,7 +687,7 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
|
||||
{
|
||||
struct io_buffer_list *bl;
|
||||
|
||||
bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
|
||||
bl = __io_buffer_get_list(ctx, bgid);
|
||||
|
||||
if (!bl || !bl->is_mmap)
|
||||
return NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user