mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
io_uring: add abstraction around apoll cache
In preparation for adding limits, and one more user, abstract out the core bits of the allocation+free cache. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
9da7471ed1
commit
9b797a37c4
@ -158,6 +158,10 @@ struct io_ev_fd {
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct io_alloc_cache {
|
||||
struct hlist_head list;
|
||||
};
|
||||
|
||||
struct io_ring_ctx {
|
||||
/* const or read-mostly hot data */
|
||||
struct {
|
||||
@ -216,7 +220,7 @@ struct io_ring_ctx {
|
||||
|
||||
struct io_hash_table cancel_table_locked;
|
||||
struct list_head cq_overflow_list;
|
||||
struct list_head apoll_cache;
|
||||
struct io_alloc_cache apoll_cache;
|
||||
struct xarray personalities;
|
||||
u32 pers_next;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
41
io_uring/alloc_cache.h
Normal file
41
io_uring/alloc_cache.h
Normal file
@ -0,0 +1,41 @@
|
||||
#ifndef IOU_ALLOC_CACHE_H
|
||||
#define IOU_ALLOC_CACHE_H
|
||||
|
||||
struct io_cache_entry {
|
||||
struct hlist_node node;
|
||||
};
|
||||
|
||||
static inline void io_alloc_cache_put(struct io_alloc_cache *cache,
|
||||
struct io_cache_entry *entry)
|
||||
{
|
||||
hlist_add_head(&entry->node, &cache->list);
|
||||
}
|
||||
|
||||
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
|
||||
{
|
||||
if (!hlist_empty(&cache->list)) {
|
||||
struct hlist_node *node = cache->list.first;
|
||||
|
||||
hlist_del(node);
|
||||
return container_of(node, struct io_cache_entry, node);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
|
||||
{
|
||||
INIT_HLIST_HEAD(&cache->list);
|
||||
}
|
||||
|
||||
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
|
||||
void (*free)(struct io_cache_entry *))
|
||||
{
|
||||
while (!hlist_empty(&cache->list)) {
|
||||
struct hlist_node *node = cache->list.first;
|
||||
|
||||
hlist_del(node);
|
||||
free(container_of(node, struct io_cache_entry, node));
|
||||
}
|
||||
}
|
||||
#endif
|
@ -92,6 +92,7 @@
|
||||
|
||||
#include "timeout.h"
|
||||
#include "poll.h"
|
||||
#include "alloc_cache.h"
|
||||
|
||||
#define IORING_MAX_ENTRIES 32768
|
||||
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
|
||||
@ -295,7 +296,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
||||
INIT_LIST_HEAD(&ctx->sqd_list);
|
||||
INIT_LIST_HEAD(&ctx->cq_overflow_list);
|
||||
INIT_LIST_HEAD(&ctx->io_buffers_cache);
|
||||
INIT_LIST_HEAD(&ctx->apoll_cache);
|
||||
io_alloc_cache_init(&ctx->apoll_cache);
|
||||
init_completion(&ctx->ref_comp);
|
||||
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
|
||||
mutex_init(&ctx->uring_lock);
|
||||
@ -1180,8 +1181,7 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
|
||||
|
||||
if (apoll->double_poll)
|
||||
kfree(apoll->double_poll);
|
||||
list_add(&apoll->poll.wait.entry,
|
||||
&ctx->apoll_cache);
|
||||
io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache);
|
||||
req->flags &= ~REQ_F_POLLED;
|
||||
}
|
||||
if (req->flags & IO_REQ_LINK_FLAGS)
|
||||
@ -2467,7 +2467,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
|
||||
if (ctx->rings)
|
||||
__io_cqring_overflow_flush(ctx, true);
|
||||
io_eventfd_unregister(ctx);
|
||||
io_flush_apoll_cache(ctx);
|
||||
io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
|
||||
mutex_unlock(&ctx->uring_lock);
|
||||
io_destroy_buffers(ctx);
|
||||
if (ctx->sq_creds)
|
||||
|
@ -590,16 +590,15 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
|
||||
unsigned issue_flags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_cache_entry *entry;
|
||||
struct async_poll *apoll;
|
||||
|
||||
if (req->flags & REQ_F_POLLED) {
|
||||
apoll = req->apoll;
|
||||
kfree(apoll->double_poll);
|
||||
} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
|
||||
!list_empty(&ctx->apoll_cache)) {
|
||||
apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
|
||||
poll.wait.entry);
|
||||
list_del_init(&apoll->poll.wait.entry);
|
||||
(entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
|
||||
apoll = container_of(entry, struct async_poll, cache);
|
||||
} else {
|
||||
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
|
||||
if (unlikely(!apoll))
|
||||
@ -960,14 +959,7 @@ out:
|
||||
return IOU_OK;
|
||||
}
|
||||
|
||||
void io_flush_apoll_cache(struct io_ring_ctx *ctx)
|
||||
void io_apoll_cache_free(struct io_cache_entry *entry)
|
||||
{
|
||||
struct async_poll *apoll;
|
||||
|
||||
while (!list_empty(&ctx->apoll_cache)) {
|
||||
apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
|
||||
poll.wait.entry);
|
||||
list_del(&apoll->poll.wait.entry);
|
||||
kfree(apoll);
|
||||
}
|
||||
kfree(container_of(entry, struct async_poll, cache));
|
||||
}
|
||||
|
@ -1,5 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "alloc_cache.h"
|
||||
|
||||
enum {
|
||||
IO_APOLL_OK,
|
||||
IO_APOLL_ABORTED,
|
||||
@ -14,7 +16,10 @@ struct io_poll {
|
||||
};
|
||||
|
||||
struct async_poll {
|
||||
struct io_poll poll;
|
||||
union {
|
||||
struct io_poll poll;
|
||||
struct io_cache_entry cache;
|
||||
};
|
||||
struct io_poll *double_poll;
|
||||
};
|
||||
|
||||
@ -31,4 +36,4 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
|
||||
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
|
||||
bool cancel_all);
|
||||
|
||||
void io_flush_apoll_cache(struct io_ring_ctx *ctx);
|
||||
void io_apoll_cache_free(struct io_cache_entry *entry);
|
||||
|
Loading…
Reference in New Issue
Block a user