mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 21:24:08 +08:00
6206f0e180
Currently we're using 40 bytes for the io_wq_work structure, and 16 of those is the doubly link list node. We don't need doubly linked lists, we always add to tail to keep things ordered, and any other use case is list traversal with deletion. For the deletion case, we can easily support any node deletion by keeping track of the previous entry. This shrinks io_wq_work to 32 bytes, and subsequently io_kiock from io_uring to 216 to 208 bytes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
128 lines
3.0 KiB
C
128 lines
3.0 KiB
C
#ifndef INTERNAL_IO_WQ_H
|
|
#define INTERNAL_IO_WQ_H
|
|
|
|
struct io_wq;
|
|
|
|
enum {
|
|
IO_WQ_WORK_CANCEL = 1,
|
|
IO_WQ_WORK_HAS_MM = 2,
|
|
IO_WQ_WORK_HASHED = 4,
|
|
IO_WQ_WORK_NEEDS_USER = 8,
|
|
IO_WQ_WORK_NEEDS_FILES = 16,
|
|
IO_WQ_WORK_UNBOUND = 32,
|
|
IO_WQ_WORK_INTERNAL = 64,
|
|
IO_WQ_WORK_CB = 128,
|
|
|
|
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
|
|
};
|
|
|
|
enum io_wq_cancel {
|
|
IO_WQ_CANCEL_OK, /* cancelled before started */
|
|
IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
|
|
IO_WQ_CANCEL_NOTFOUND, /* work not found */
|
|
};
|
|
|
|
struct io_wq_work_node {
|
|
struct io_wq_work_node *next;
|
|
};
|
|
|
|
struct io_wq_work_list {
|
|
struct io_wq_work_node *first;
|
|
struct io_wq_work_node *last;
|
|
};
|
|
|
|
static inline void wq_list_add_tail(struct io_wq_work_node *node,
|
|
struct io_wq_work_list *list)
|
|
{
|
|
if (!list->first) {
|
|
list->first = list->last = node;
|
|
} else {
|
|
list->last->next = node;
|
|
list->last = node;
|
|
}
|
|
}
|
|
|
|
static inline void wq_node_del(struct io_wq_work_list *list,
|
|
struct io_wq_work_node *node,
|
|
struct io_wq_work_node *prev)
|
|
{
|
|
if (node == list->first)
|
|
list->first = node->next;
|
|
if (node == list->last)
|
|
list->last = prev;
|
|
if (prev)
|
|
prev->next = node->next;
|
|
}
|
|
|
|
#define wq_list_for_each(pos, prv, head) \
|
|
for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
|
|
|
|
#define wq_list_empty(list) ((list)->first == NULL)
|
|
#define INIT_WQ_LIST(list) do { \
|
|
(list)->first = NULL; \
|
|
(list)->last = NULL; \
|
|
} while (0)
|
|
|
|
struct io_wq_work {
|
|
union {
|
|
struct io_wq_work_node list;
|
|
void *data;
|
|
};
|
|
void (*func)(struct io_wq_work **);
|
|
struct files_struct *files;
|
|
unsigned flags;
|
|
};
|
|
|
|
#define INIT_IO_WORK(work, _func) \
|
|
do { \
|
|
(work)->list.next = NULL; \
|
|
(work)->func = _func; \
|
|
(work)->flags = 0; \
|
|
(work)->files = NULL; \
|
|
} while (0) \
|
|
|
|
typedef void (get_work_fn)(struct io_wq_work *);
|
|
typedef void (put_work_fn)(struct io_wq_work *);
|
|
|
|
struct io_wq_data {
|
|
struct mm_struct *mm;
|
|
struct user_struct *user;
|
|
struct cred *creds;
|
|
|
|
get_work_fn *get_work;
|
|
put_work_fn *put_work;
|
|
};
|
|
|
|
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
|
|
void io_wq_destroy(struct io_wq *wq);
|
|
|
|
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
|
|
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
|
|
void io_wq_flush(struct io_wq *wq);
|
|
|
|
void io_wq_cancel_all(struct io_wq *wq);
|
|
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
|
|
|
|
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
|
|
|
|
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
|
void *data);
|
|
|
|
#if defined(CONFIG_IO_WQ)
|
|
extern void io_wq_worker_sleeping(struct task_struct *);
|
|
extern void io_wq_worker_running(struct task_struct *);
|
|
#else
|
|
static inline void io_wq_worker_sleeping(struct task_struct *tsk)
|
|
{
|
|
}
|
|
static inline void io_wq_worker_running(struct task_struct *tsk)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
static inline bool io_wq_current_is_worker(void)
|
|
{
|
|
return in_task() && (current->flags & PF_IO_WORKER);
|
|
}
|
|
#endif
|