mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 00:04:15 +08:00
8f350194d5
This adds support for IORING_OP_FUTEX_WAITV, which allows registering a notification for a number of futexes at once. If one of the futexes are woken, then the request will complete with the index of the futex that got woken as the result. This is identical to what the normal vectored futex waitv operation does. Use like IORING_OP_FUTEX_WAIT, except sqe->addr must now contain a pointer to a struct futex_waitv array, and sqe->off must now contain the number of elements in that array. As flags are passed in the futex_vector array, and likewise for the value and futex address(es), sqe->addr2 and sqe->addr3 are also reserved for IORING_OP_FUTEX_WAITV. For cancelations, FUTEX_WAITV does not rely on the futex_unqueue() return value as we're dealing with multiple futexes. Instead, a separate per io_uring request atomic is used to claim ownership of the request. Waiting on N futexes could be done with IORING_OP_FUTEX_WAIT as well, but that punts a lot of the work to the application: 1) Application would need to submit N IORING_OP_FUTEX_WAIT requests, rather than just a single IORING_OP_FUTEX_WAITV. 2) When one futex is woken, application would need to cancel the remaining N-1 requests that didn't trigger. While this is of course doable, having a single vectored futex wait makes for much simpler application code. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
37 lines
1.1 KiB
C
37 lines
1.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include "cancel.h"
|
|
|
|
int io_futex_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_futexv_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
#if defined(CONFIG_FUTEX)
|
|
int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
|
|
unsigned int issue_flags);
|
|
bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
|
|
bool cancel_all);
|
|
void io_futex_cache_init(struct io_ring_ctx *ctx);
|
|
void io_futex_cache_free(struct io_ring_ctx *ctx);
|
|
#else
|
|
static inline int io_futex_cancel(struct io_ring_ctx *ctx,
|
|
struct io_cancel_data *cd,
|
|
unsigned int issue_flags)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline bool io_futex_remove_all(struct io_ring_ctx *ctx,
|
|
struct task_struct *task, bool cancel_all)
|
|
{
|
|
return false;
|
|
}
|
|
static inline void io_futex_cache_init(struct io_ring_ctx *ctx)
|
|
{
|
|
}
|
|
static inline void io_futex_cache_free(struct io_ring_ctx *ctx)
|
|
{
|
|
}
|
|
#endif
|