2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2025-01-01 18:24:23 +08:00

io_uring: clean up tracing events

We have lots of trace events accepting an io_uring request and wanting
to print some of its fields like user_data, opcode, flags and so on.
However, as trace points were unaware of io_uring structures, we had to
pass all the fields as arguments. Teach trace/events/io_uring.h about
struct io_kiocb and stop the misery of passing a horde of arguments to
trace helpers.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/40ff72f92798114e56d400f2b003beb6cde6ef53.1655384063.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2022-06-16 13:57:20 +01:00 committed by Jens Axboe
parent ab1c84d855
commit 48863ffd3e
4 changed files with 66 additions and 100 deletions

View File

@ -7,6 +7,7 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <uapi/linux/io_uring.h> #include <uapi/linux/io_uring.h>
#include <linux/io_uring_types.h>
#include <linux/io_uring.h> #include <linux/io_uring.h>
struct io_wq_work; struct io_wq_work;
@ -97,9 +98,7 @@ TRACE_EVENT(io_uring_register,
/** /**
* io_uring_file_get - called before getting references to an SQE file * io_uring_file_get - called before getting references to an SQE file
* *
* @ctx: pointer to a ring context structure
* @req: pointer to a submitted request * @req: pointer to a submitted request
* @user_data: user data associated with the request
* @fd: SQE file descriptor * @fd: SQE file descriptor
* *
* Allows to trace out how often an SQE file reference is obtained, which can * Allows to trace out how often an SQE file reference is obtained, which can
@ -108,9 +107,9 @@ TRACE_EVENT(io_uring_register,
*/ */
TRACE_EVENT(io_uring_file_get, TRACE_EVENT(io_uring_file_get,
TP_PROTO(void *ctx, void *req, unsigned long long user_data, int fd), TP_PROTO(struct io_kiocb *req, int fd),
TP_ARGS(ctx, req, user_data, fd), TP_ARGS(req, fd),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
@ -120,9 +119,9 @@ TRACE_EVENT(io_uring_file_get,
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctx = ctx; __entry->ctx = req->ctx;
__entry->req = req; __entry->req = req;
__entry->user_data = user_data; __entry->user_data = req->cqe.user_data;
__entry->fd = fd; __entry->fd = fd;
), ),
@ -133,22 +132,16 @@ TRACE_EVENT(io_uring_file_get,
/** /**
* io_uring_queue_async_work - called before submitting a new async work * io_uring_queue_async_work - called before submitting a new async work
* *
* @ctx: pointer to a ring context structure
* @req: pointer to a submitted request * @req: pointer to a submitted request
* @user_data: user data associated with the request
* @opcode: opcode of request
* @flags request flags
* @work: pointer to a submitted io_wq_work
* @rw: type of workqueue, hashed or normal * @rw: type of workqueue, hashed or normal
* *
* Allows to trace asynchronous work submission. * Allows to trace asynchronous work submission.
*/ */
TRACE_EVENT(io_uring_queue_async_work, TRACE_EVENT(io_uring_queue_async_work,
TP_PROTO(void *ctx, void * req, unsigned long long user_data, u8 opcode, TP_PROTO(struct io_kiocb *req, int rw),
unsigned int flags, struct io_wq_work *work, int rw),
TP_ARGS(ctx, req, user_data, opcode, flags, work, rw), TP_ARGS(req, rw),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
@ -159,19 +152,19 @@ TRACE_EVENT(io_uring_queue_async_work,
__field( struct io_wq_work *, work ) __field( struct io_wq_work *, work )
__field( int, rw ) __field( int, rw )
__string( op_str, io_uring_get_opcode(opcode) ) __string( op_str, io_uring_get_opcode(req->opcode) )
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctx = ctx; __entry->ctx = req->ctx;
__entry->req = req; __entry->req = req;
__entry->user_data = user_data; __entry->user_data = req->cqe.user_data;
__entry->flags = flags; __entry->flags = req->flags;
__entry->opcode = opcode; __entry->opcode = req->opcode;
__entry->work = work; __entry->work = &req->work;
__entry->rw = rw; __entry->rw = rw;
__assign_str(op_str, io_uring_get_opcode(opcode)); __assign_str(op_str, io_uring_get_opcode(req->opcode));
), ),
TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p", TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p",
@ -183,19 +176,16 @@ TRACE_EVENT(io_uring_queue_async_work,
/** /**
* io_uring_defer - called when an io_uring request is deferred * io_uring_defer - called when an io_uring request is deferred
* *
* @ctx: pointer to a ring context structure
* @req: pointer to a deferred request * @req: pointer to a deferred request
* @user_data: user data associated with the request
* @opcode: opcode of request
* *
* Allows to track deferred requests, to get an insight about what requests are * Allows to track deferred requests, to get an insight about what requests are
* not started immediately. * not started immediately.
*/ */
TRACE_EVENT(io_uring_defer, TRACE_EVENT(io_uring_defer,
TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode), TP_PROTO(struct io_kiocb *req),
TP_ARGS(ctx, req, user_data, opcode), TP_ARGS(req),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
@ -203,16 +193,16 @@ TRACE_EVENT(io_uring_defer,
__field( unsigned long long, data ) __field( unsigned long long, data )
__field( u8, opcode ) __field( u8, opcode )
__string( op_str, io_uring_get_opcode(opcode) ) __string( op_str, io_uring_get_opcode(req->opcode) )
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctx = ctx; __entry->ctx = req->ctx;
__entry->req = req; __entry->req = req;
__entry->data = user_data; __entry->data = req->cqe.user_data;
__entry->opcode = opcode; __entry->opcode = req->opcode;
__assign_str(op_str, io_uring_get_opcode(opcode)); __assign_str(op_str, io_uring_get_opcode(req->opcode));
), ),
TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s", TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s",
@ -224,7 +214,6 @@ TRACE_EVENT(io_uring_defer,
* io_uring_link - called before the io_uring request added into link_list of * io_uring_link - called before the io_uring request added into link_list of
* another request * another request
* *
* @ctx: pointer to a ring context structure
* @req: pointer to a linked request * @req: pointer to a linked request
* @target_req: pointer to a previous request, that would contain @req * @target_req: pointer to a previous request, that would contain @req
* *
@ -233,9 +222,9 @@ TRACE_EVENT(io_uring_defer,
*/ */
TRACE_EVENT(io_uring_link, TRACE_EVENT(io_uring_link,
TP_PROTO(void *ctx, void *req, void *target_req), TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req),
TP_ARGS(ctx, req, target_req), TP_ARGS(req, target_req),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
@ -244,7 +233,7 @@ TRACE_EVENT(io_uring_link,
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctx = ctx; __entry->ctx = req->ctx;
__entry->req = req; __entry->req = req;
__entry->target_req = target_req; __entry->target_req = target_req;
), ),
@ -285,10 +274,7 @@ TRACE_EVENT(io_uring_cqring_wait,
/** /**
* io_uring_fail_link - called before failing a linked request * io_uring_fail_link - called before failing a linked request
* *
* @ctx: pointer to a ring context structure
* @req: request, which links were cancelled * @req: request, which links were cancelled
* @user_data: user data associated with the request
* @opcode: opcode of request
* @link: cancelled link * @link: cancelled link
* *
* Allows to track linked requests cancellation, to see not only that some work * Allows to track linked requests cancellation, to see not only that some work
@ -296,9 +282,9 @@ TRACE_EVENT(io_uring_cqring_wait,
*/ */
TRACE_EVENT(io_uring_fail_link, TRACE_EVENT(io_uring_fail_link,
TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, void *link), TP_PROTO(struct io_kiocb *req, struct io_kiocb *link),
TP_ARGS(ctx, req, user_data, opcode, link), TP_ARGS(req, link),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
@ -307,17 +293,17 @@ TRACE_EVENT(io_uring_fail_link,
__field( u8, opcode ) __field( u8, opcode )
__field( void *, link ) __field( void *, link )
__string( op_str, io_uring_get_opcode(opcode) ) __string( op_str, io_uring_get_opcode(req->opcode) )
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctx = ctx; __entry->ctx = req->ctx;
__entry->req = req; __entry->req = req;
__entry->user_data = user_data; __entry->user_data = req->cqe.user_data;
__entry->opcode = opcode; __entry->opcode = req->opcode;
__entry->link = link; __entry->link = link;
__assign_str(op_str, io_uring_get_opcode(opcode)); __assign_str(op_str, io_uring_get_opcode(req->opcode));
), ),
TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p", TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p",
@ -376,23 +362,17 @@ TRACE_EVENT(io_uring_complete,
/** /**
* io_uring_submit_sqe - called before submitting one SQE * io_uring_submit_sqe - called before submitting one SQE
* *
* @ctx: pointer to a ring context structure
* @req: pointer to a submitted request * @req: pointer to a submitted request
* @user_data: user data associated with the request
* @opcode: opcode of request
* @flags request flags
* @force_nonblock: whether a context blocking or not * @force_nonblock: whether a context blocking or not
* @sq_thread: true if sq_thread has submitted this SQE
* *
* Allows to track SQE submitting, to understand what was the source of it, SQ * Allows to track SQE submitting, to understand what was the source of it, SQ
* thread or io_uring_enter call. * thread or io_uring_enter call.
*/ */
TRACE_EVENT(io_uring_submit_sqe, TRACE_EVENT(io_uring_submit_sqe,
TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, u32 flags, TP_PROTO(struct io_kiocb *req, bool force_nonblock),
bool force_nonblock, bool sq_thread),
TP_ARGS(ctx, req, user_data, opcode, flags, force_nonblock, sq_thread), TP_ARGS(req, force_nonblock),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
@ -403,19 +383,19 @@ TRACE_EVENT(io_uring_submit_sqe,
__field( bool, force_nonblock ) __field( bool, force_nonblock )
__field( bool, sq_thread ) __field( bool, sq_thread )
__string( op_str, io_uring_get_opcode(opcode) ) __string( op_str, io_uring_get_opcode(req->opcode) )
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctx = ctx; __entry->ctx = req->ctx;
__entry->req = req; __entry->req = req;
__entry->user_data = user_data; __entry->user_data = req->cqe.user_data;
__entry->opcode = opcode; __entry->opcode = req->opcode;
__entry->flags = flags; __entry->flags = req->flags;
__entry->force_nonblock = force_nonblock; __entry->force_nonblock = force_nonblock;
__entry->sq_thread = sq_thread; __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL;
__assign_str(op_str, io_uring_get_opcode(opcode)); __assign_str(op_str, io_uring_get_opcode(req->opcode));
), ),
TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, " TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, "
@ -427,10 +407,7 @@ TRACE_EVENT(io_uring_submit_sqe,
/* /*
* io_uring_poll_arm - called after arming a poll wait if successful * io_uring_poll_arm - called after arming a poll wait if successful
* *
* @ctx: pointer to a ring context structure
* @req: pointer to the armed request * @req: pointer to the armed request
* @user_data: user data associated with the request
* @opcode: opcode of request
* @mask: request poll events mask * @mask: request poll events mask
* @events: registered events of interest * @events: registered events of interest
* *
@ -439,10 +416,9 @@ TRACE_EVENT(io_uring_submit_sqe,
*/ */
TRACE_EVENT(io_uring_poll_arm, TRACE_EVENT(io_uring_poll_arm,
TP_PROTO(void *ctx, void *req, u64 user_data, u8 opcode, TP_PROTO(struct io_kiocb *req, int mask, int events),
int mask, int events),
TP_ARGS(ctx, req, user_data, opcode, mask, events), TP_ARGS(req, mask, events),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
@ -452,18 +428,18 @@ TRACE_EVENT(io_uring_poll_arm,
__field( int, mask ) __field( int, mask )
__field( int, events ) __field( int, events )
__string( op_str, io_uring_get_opcode(opcode) ) __string( op_str, io_uring_get_opcode(req->opcode) )
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctx = ctx; __entry->ctx = req->ctx;
__entry->req = req; __entry->req = req;
__entry->user_data = user_data; __entry->user_data = req->cqe.user_data;
__entry->opcode = opcode; __entry->opcode = req->opcode;
__entry->mask = mask; __entry->mask = mask;
__entry->events = events; __entry->events = events;
__assign_str(op_str, io_uring_get_opcode(opcode)); __assign_str(op_str, io_uring_get_opcode(req->opcode));
), ),
TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x", TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x",
@ -475,18 +451,15 @@ TRACE_EVENT(io_uring_poll_arm,
/* /*
* io_uring_task_add - called after adding a task * io_uring_task_add - called after adding a task
* *
* @ctx: pointer to a ring context structure
* @req: pointer to request * @req: pointer to request
* @user_data: user data associated with the request
* @opcode: opcode of request
* @mask: request poll events mask * @mask: request poll events mask
* *
*/ */
TRACE_EVENT(io_uring_task_add, TRACE_EVENT(io_uring_task_add,
TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, int mask), TP_PROTO(struct io_kiocb *req, int mask),
TP_ARGS(ctx, req, user_data, opcode, mask), TP_ARGS(req, mask),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
@ -495,17 +468,17 @@ TRACE_EVENT(io_uring_task_add,
__field( u8, opcode ) __field( u8, opcode )
__field( int, mask ) __field( int, mask )
__string( op_str, io_uring_get_opcode(opcode) ) __string( op_str, io_uring_get_opcode(req->opcode) )
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctx = ctx; __entry->ctx = req->ctx;
__entry->req = req; __entry->req = req;
__entry->user_data = user_data; __entry->user_data = req->cqe.user_data;
__entry->opcode = opcode; __entry->opcode = req->opcode;
__entry->mask = mask; __entry->mask = mask;
__assign_str(op_str, io_uring_get_opcode(opcode)); __assign_str(op_str, io_uring_get_opcode(req->opcode));
), ),
TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x", TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x",
@ -518,7 +491,6 @@ TRACE_EVENT(io_uring_task_add,
* io_uring_req_failed - called when an sqe is errored dring submission * io_uring_req_failed - called when an sqe is errored dring submission
* *
* @sqe: pointer to the io_uring_sqe that failed * @sqe: pointer to the io_uring_sqe that failed
* @ctx: pointer to a ring context structure
* @req: pointer to request * @req: pointer to request
* @error: error it failed with * @error: error it failed with
* *
@ -526,9 +498,9 @@ TRACE_EVENT(io_uring_task_add,
*/ */
TRACE_EVENT(io_uring_req_failed, TRACE_EVENT(io_uring_req_failed,
TP_PROTO(const struct io_uring_sqe *sqe, void *ctx, void *req, int error), TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error),
TP_ARGS(sqe, ctx, req, error), TP_ARGS(sqe, req, error),
TP_STRUCT__entry ( TP_STRUCT__entry (
__field( void *, ctx ) __field( void *, ctx )
@ -552,7 +524,7 @@ TRACE_EVENT(io_uring_req_failed,
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctx = ctx; __entry->ctx = req->ctx;
__entry->req = req; __entry->req = req;
__entry->user_data = sqe->user_data; __entry->user_data = sqe->user_data;
__entry->opcode = sqe->opcode; __entry->opcode = sqe->opcode;

View File

@ -452,9 +452,7 @@ void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
if (WARN_ON_ONCE(!same_thread_group(req->task, current))) if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
req->work.flags |= IO_WQ_WORK_CANCEL; req->work.flags |= IO_WQ_WORK_CANCEL;
trace_io_uring_queue_async_work(req->ctx, req, req->cqe.user_data, trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
req->opcode, req->flags, &req->work,
io_wq_is_hashed(&req->work));
io_wq_enqueue(tctx->io_wq, &req->work); io_wq_enqueue(tctx->io_wq, &req->work);
if (link) if (link)
io_queue_linked_timeout(link); io_queue_linked_timeout(link);
@ -1583,7 +1581,7 @@ fail:
goto queue; goto queue;
} }
trace_io_uring_defer(ctx, req, req->cqe.user_data, req->opcode); trace_io_uring_defer(req);
de->req = req; de->req = req;
de->seq = seq; de->seq = seq;
list_add_tail(&de->list, &ctx->defer_list); list_add_tail(&de->list, &ctx->defer_list);
@ -1783,7 +1781,7 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
{ {
struct file *file = fget(fd); struct file *file = fget(fd);
trace_io_uring_file_get(req->ctx, req, req->cqe.user_data, fd); trace_io_uring_file_get(req, fd);
/* we don't allow fixed io_uring files */ /* we don't allow fixed io_uring files */
if (file && io_is_uring_fops(file)) if (file && io_is_uring_fops(file))
@ -2006,7 +2004,7 @@ static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
struct io_submit_link *link = &ctx->submit_state.link; struct io_submit_link *link = &ctx->submit_state.link;
struct io_kiocb *head = link->head; struct io_kiocb *head = link->head;
trace_io_uring_req_failed(sqe, ctx, req, ret); trace_io_uring_req_failed(sqe, req, ret);
/* /*
* Avoid breaking links in the middle as it renders links with SQPOLL * Avoid breaking links in the middle as it renders links with SQPOLL
@ -2048,9 +2046,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return io_submit_fail_init(sqe, req, ret); return io_submit_fail_init(sqe, req, ret);
/* don't need @sqe from now on */ /* don't need @sqe from now on */
trace_io_uring_submit_sqe(ctx, req, req->cqe.user_data, req->opcode, trace_io_uring_submit_sqe(req, true);
req->flags, true,
ctx->flags & IORING_SETUP_SQPOLL);
/* /*
* If we already have a head request, queue this one for async * If we already have a head request, queue this one for async
@ -2064,7 +2060,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(ret)) if (unlikely(ret))
return io_submit_fail_init(sqe, req, ret); return io_submit_fail_init(sqe, req, ret);
trace_io_uring_link(ctx, req, link->head); trace_io_uring_link(req, link->head);
link->last->link = req; link->last->link = req;
link->last = req; link->last = req;

View File

@ -288,7 +288,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask,
else else
req->io_task_work.func = io_apoll_task_func; req->io_task_work.func = io_apoll_task_func;
trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask); trace_io_uring_task_add(req, mask);
io_req_task_work_add(req); io_req_task_work_add(req);
} }
@ -558,8 +558,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
if (ret || ipt.error) if (ret || ipt.error)
return ret ? IO_APOLL_READY : IO_APOLL_ABORTED; return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
trace_io_uring_poll_arm(ctx, req, req->cqe.user_data, req->opcode, trace_io_uring_poll_arm(req, mask, apoll->poll.events);
mask, apoll->poll.events);
return IO_APOLL_OK; return IO_APOLL_OK;
} }

View File

@ -115,8 +115,7 @@ static void io_fail_links(struct io_kiocb *req)
nxt = link->link; nxt = link->link;
link->link = NULL; link->link = NULL;
trace_io_uring_fail_link(req->ctx, req, req->cqe.user_data, trace_io_uring_fail_link(req, link);
req->opcode, link);
if (ignore_cqes) if (ignore_cqes)
link->flags |= REQ_F_CQE_SKIP; link->flags |= REQ_F_CQE_SKIP;