tracing: Disable snapshot buffer when stopping instance tracers

commit b538bf7d0e upstream.

It use to be that only the top level instance had a snapshot buffer (for
latency tracers like wakeup and irqsoff). When stopping a tracer in an
instance would not disable the snapshot buffer. This could have some
unintended consequences if the irqsoff tracer is enabled.

Consolidate the tracing_start/stop() with tracing_start/stop_tr() so that
all instances behave the same. The tracing_start/stop() functions will
just call their respective tracing_start/stop_tr() with the global_array
passed in.

Link: https://lkml.kernel.org/r/20231205220011.041220035@goodmis.org

Cc: stable@vger.kernel.org
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Fixes: 6d9b3fa5e7 ("tracing: Move tracing_max_latency into trace_array")
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Steven Rostedt (Google) 2023-12-05 16:52:11 -05:00 committed by Greg Kroah-Hartman
parent 12c48e88e5
commit 0486a1f9d9

View File

@ -2359,49 +2359,6 @@ int is_tracing_stopped(void)
return global_trace.stop_count;
}
/**
* tracing_start - quick start of the tracer
*
* If tracing is enabled but was stopped by tracing_stop,
* this will start the tracer back up.
*/
void tracing_start(void)
{
struct trace_buffer *buffer;
unsigned long flags;
if (tracing_disabled)
return;
raw_spin_lock_irqsave(&global_trace.start_lock, flags);
if (--global_trace.stop_count) {
if (global_trace.stop_count < 0) {
/* Someone screwed up their debugging */
WARN_ON_ONCE(1);
global_trace.stop_count = 0;
}
goto out;
}
/* Prevent the buffers from switching */
arch_spin_lock(&global_trace.max_lock);
buffer = global_trace.array_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
buffer = global_trace.max_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#endif
arch_spin_unlock(&global_trace.max_lock);
out:
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
}
static void tracing_start_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
@ -2410,25 +2367,70 @@ static void tracing_start_tr(struct trace_array *tr)
if (tracing_disabled)
return;
/* If global, we need to also start the max tracer */
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
return tracing_start();
raw_spin_lock_irqsave(&tr->start_lock, flags);
if (--tr->stop_count) {
if (tr->stop_count < 0) {
if (WARN_ON_ONCE(tr->stop_count < 0)) {
/* Someone screwed up their debugging */
WARN_ON_ONCE(1);
tr->stop_count = 0;
}
goto out;
}
/* Prevent the buffers from switching */
arch_spin_lock(&tr->max_lock);
buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
buffer = tr->max_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#endif
arch_spin_unlock(&tr->max_lock);
out:
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
/**
* tracing_start - quick start of the tracer
*
* If tracing is enabled but was stopped by tracing_stop,
* this will start the tracer back up.
*/
void tracing_start(void)
{
return tracing_start_tr(&global_trace);
}
static void tracing_stop_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;
raw_spin_lock_irqsave(&tr->start_lock, flags);
if (tr->stop_count++)
goto out;
/* Prevent the buffers from switching */
arch_spin_lock(&tr->max_lock);
buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
buffer = tr->max_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#endif
arch_spin_unlock(&tr->max_lock);
out:
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
@ -2441,51 +2443,7 @@ static void tracing_start_tr(struct trace_array *tr)
*/
void tracing_stop(void)
{
struct trace_buffer *buffer;
unsigned long flags;
raw_spin_lock_irqsave(&global_trace.start_lock, flags);
if (global_trace.stop_count++)
goto out;
/* Prevent the buffers from switching */
arch_spin_lock(&global_trace.max_lock);
buffer = global_trace.array_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
buffer = global_trace.max_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#endif
arch_spin_unlock(&global_trace.max_lock);
out:
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
}
static void tracing_stop_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;
/* If global, we need to also stop the max tracer */
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
return tracing_stop();
raw_spin_lock_irqsave(&tr->start_lock, flags);
if (tr->stop_count++)
goto out;
buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
out:
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
return tracing_stop_tr(&global_trace);
}
static int trace_save_cmdline(struct task_struct *tsk)