kthread: add kthread_work tracepoints

While migrating some code from wq to kthread_worker, I found that I missed
the execute_start/end tracepoints.  So add similar tracepoints for
kthread_work.  And for completeness, queue_work tracepoint (although this
one differs slightly from the matching workqueue tracepoint).

Link: https://lkml.kernel.org/r/20201010180323.126634-1-robdclark@gmail.com
Signed-off-by: Rob Clark <robdclark@chromium.org>
Cc: Rob Clark <robdclark@chromium.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Cc: Phil Auld <pauld@redhat.com>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Thara Gopinath <thara.gopinath@linaro.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Vincent Donnefort <vincent.donnefort@arm.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Ilias Stamatis <stamatis.iliass@gmail.com>
Cc: Liang Chen <cl@rock-chips.com>
Cc: Ben Dooks <ben.dooks@codethink.co.uk>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "J. Bruce Fields" <bfields@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Rob Clark 2020-12-14 19:03:14 -08:00 committed by Linus Torvalds
parent 2c85ebc57b
commit f630c7c6f1
2 changed files with 93 additions and 0 deletions

View File

@ -5,6 +5,7 @@
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H
#include <linux/kthread.h>
#include <linux/sched/numa_balancing.h>
#include <linux/tracepoint.h>
#include <linux/binfmts.h>
@ -51,6 +52,89 @@ TRACE_EVENT(sched_kthread_stop_ret,
TP_printk("ret=%d", __entry->ret)
);
/**
* sched_kthread_work_queue_work - called when a work gets queued
* @worker: pointer to the kthread_worker
* @work: pointer to struct kthread_work
*
* This event occurs when a work is queued immediately or once a
* delayed work is actually queued (ie: once the delay has been
* reached).
*/
TRACE_EVENT(sched_kthread_work_queue_work,
TP_PROTO(struct kthread_worker *worker,
struct kthread_work *work),
TP_ARGS(worker, work),
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
__field( void *, worker)
),
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
__entry->worker = worker;
),
TP_printk("work struct=%p function=%ps worker=%p",
__entry->work, __entry->function, __entry->worker)
);
/**
* sched_kthread_work_execute_start - called immediately before the work callback
* @work: pointer to struct kthread_work
*
* Allows to track kthread work execution.
*/
TRACE_EVENT(sched_kthread_work_execute_start,
TP_PROTO(struct kthread_work *work),
TP_ARGS(work),
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
),
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
),
TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
);
/**
* sched_kthread_work_execute_end - called immediately after the work callback
* @work: pointer to struct work_struct
* @function: pointer to worker function
*
* Allows to track workqueue execution.
*/
TRACE_EVENT(sched_kthread_work_execute_end,
TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
TP_ARGS(work, function),
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
),
TP_fast_assign(
__entry->work = work;
__entry->function = function;
),
TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
);
/*
* Tracepoint for waking up a task:
*/

View File

@ -704,8 +704,15 @@ repeat:
raw_spin_unlock_irq(&worker->lock);
if (work) {
kthread_work_func_t func = work->func;
__set_current_state(TASK_RUNNING);
trace_sched_kthread_work_execute_start(work);
work->func(work);
/*
* Avoid dereferencing work after this point. The trace
* event only cares about the address.
*/
trace_sched_kthread_work_execute_end(work, func);
} else if (!freezing(current))
schedule();
@ -834,6 +841,8 @@ static void kthread_insert_work(struct kthread_worker *worker,
{
kthread_insert_work_sanity_check(worker, work);
trace_sched_kthread_work_queue_work(worker, work);
list_add_tail(&work->node, pos);
work->worker = worker;
if (!worker->current_work && likely(worker->task))