mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
io_uring: flush notifiers after sendzc
Allow to flush notifiers as a part of sendzc request by setting IORING_SENDZC_FLUSH flag. When the sendzc request succeedes it will flush the used [active] notifier. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/e0b4d9a6797e2fd6092824fe42953db7a519bbc8.1657643355.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
10c7d33ecd
commit
63809137eb
@ -275,10 +275,14 @@ enum io_uring_op {
|
||||
*
|
||||
* IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
|
||||
* the buf_index field.
|
||||
*
|
||||
* IORING_RECVSEND_NOTIF_FLUSH Flush a notification after a successful
|
||||
* successful. Only for zerocopy sends.
|
||||
*/
|
||||
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
|
||||
#define IORING_RECV_MULTISHOT (1U << 1)
|
||||
#define IORING_RECVSEND_FIXED_BUF (1U << 2)
|
||||
#define IORING_RECVSEND_NOTIF_FLUSH (1U << 3)
|
||||
|
||||
/*
|
||||
* accept flags stored in sqe->ioprio
|
||||
|
@ -621,7 +621,7 @@ void __io_put_task(struct task_struct *task, int nr)
|
||||
put_task_struct_many(task, nr);
|
||||
}
|
||||
|
||||
static void io_task_refs_refill(struct io_uring_task *tctx)
|
||||
void io_task_refs_refill(struct io_uring_task *tctx)
|
||||
{
|
||||
unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
|
||||
|
||||
@ -630,15 +630,6 @@ static void io_task_refs_refill(struct io_uring_task *tctx)
|
||||
tctx->cached_refs += refill;
|
||||
}
|
||||
|
||||
static inline void io_get_task_refs(int nr)
|
||||
{
|
||||
struct io_uring_task *tctx = current->io_uring;
|
||||
|
||||
tctx->cached_refs -= nr;
|
||||
if (unlikely(tctx->cached_refs < 0))
|
||||
io_task_refs_refill(tctx);
|
||||
}
|
||||
|
||||
static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
|
||||
{
|
||||
struct io_uring_task *tctx = task->io_uring;
|
||||
|
@ -74,6 +74,7 @@ void io_wq_submit_work(struct io_wq_work *work);
|
||||
void io_free_req(struct io_kiocb *req);
|
||||
void io_queue_next(struct io_kiocb *req);
|
||||
void __io_put_task(struct task_struct *task, int nr);
|
||||
void io_task_refs_refill(struct io_uring_task *tctx);
|
||||
|
||||
bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
|
||||
bool cancel_all);
|
||||
@ -270,4 +271,13 @@ static inline void io_put_task(struct task_struct *task, int nr)
|
||||
__io_put_task(task, nr);
|
||||
}
|
||||
|
||||
static inline void io_get_task_refs(int nr)
|
||||
{
|
||||
struct io_uring_task *tctx = current->io_uring;
|
||||
|
||||
tctx->cached_refs -= nr;
|
||||
if (unlikely(tctx->cached_refs < 0))
|
||||
io_task_refs_refill(tctx);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -856,7 +856,8 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
return -EINVAL;
|
||||
|
||||
zc->flags = READ_ONCE(sqe->ioprio);
|
||||
if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF))
|
||||
if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
|
||||
IORING_RECVSEND_FIXED_BUF | IORING_RECVSEND_NOTIF_FLUSH))
|
||||
return -EINVAL;
|
||||
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
|
||||
unsigned idx = READ_ONCE(sqe->buf_index);
|
||||
@ -958,6 +959,8 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
|
||||
return ret == -ERESTARTSYS ? -EINTR : ret;
|
||||
}
|
||||
|
||||
if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH)
|
||||
io_notif_slot_flush_submit(notif_slot, 0);
|
||||
io_req_set_res(req, ret, 0);
|
||||
return IOU_OK;
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ struct io_notif *io_alloc_notif(struct io_ring_ctx *ctx,
|
||||
return notif;
|
||||
}
|
||||
|
||||
static void io_notif_slot_flush(struct io_notif_slot *slot)
|
||||
void io_notif_slot_flush(struct io_notif_slot *slot)
|
||||
__must_hold(&ctx->uring_lock)
|
||||
{
|
||||
struct io_notif *notif = slot->notif;
|
||||
|
@ -54,6 +54,7 @@ int io_notif_register(struct io_ring_ctx *ctx,
|
||||
int io_notif_unregister(struct io_ring_ctx *ctx);
|
||||
void io_notif_cache_purge(struct io_ring_ctx *ctx);
|
||||
|
||||
void io_notif_slot_flush(struct io_notif_slot *slot);
|
||||
struct io_notif *io_alloc_notif(struct io_ring_ctx *ctx,
|
||||
struct io_notif_slot *slot);
|
||||
|
||||
@ -74,3 +75,13 @@ static inline struct io_notif_slot *io_get_notif_slot(struct io_ring_ctx *ctx,
|
||||
idx = array_index_nospec(idx, ctx->nr_notif_slots);
|
||||
return &ctx->notif_slots[idx];
|
||||
}
|
||||
|
||||
static inline void io_notif_slot_flush_submit(struct io_notif_slot *slot,
|
||||
unsigned int issue_flags)
|
||||
{
|
||||
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
|
||||
slot->notif->task = current;
|
||||
io_get_task_refs(1);
|
||||
}
|
||||
io_notif_slot_flush(slot);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user