mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
tracing/perf: Add interrupt_context_level() helper
Now that there are three different instances of doing the addition trick to the preempt_count() and NMI_MASK, HARDIRQ_MASK and SOFTIRQ_OFFSET macros, it deserves a helper function defined in the preempt.h header. Add the interrupt_context_level() helper and replace the three instances that do that logic with it. Link: https://lore.kernel.org/all/20211015142541.4badd8a9@gandalf.local.home/ Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
9b84fadc44
commit
91ebe8bcbf
@ -77,6 +77,27 @@
|
||||
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
|
||||
#include <asm/preempt.h>
|
||||
|
||||
/**
|
||||
* interrupt_context_level - return interrupt context level
|
||||
*
|
||||
* Returns the current interrupt context level.
|
||||
* 0 - normal context
|
||||
* 1 - softirq context
|
||||
* 2 - hardirq context
|
||||
* 3 - NMI context
|
||||
*/
|
||||
static __always_inline unsigned char interrupt_context_level(void)
|
||||
{
|
||||
unsigned long pc = preempt_count();
|
||||
unsigned char level = 0;
|
||||
|
||||
level += !!(pc & (NMI_MASK));
|
||||
level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
|
||||
level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
|
||||
|
||||
return level;
|
||||
}
|
||||
|
||||
#define nmi_count() (preempt_count() & NMI_MASK)
|
||||
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
|
||||
#ifdef CONFIG_PREEMPT_RT
|
||||
|
@ -136,12 +136,7 @@ enum {
|
||||
|
||||
static __always_inline int trace_get_context_bit(void)
|
||||
{
|
||||
unsigned long pc = preempt_count();
|
||||
unsigned char bit = 0;
|
||||
|
||||
bit += !!(pc & (NMI_MASK));
|
||||
bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
|
||||
bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
|
||||
unsigned char bit = interrupt_context_level();
|
||||
|
||||
return TRACE_CTX_NORMAL - bit;
|
||||
}
|
||||
|
@ -205,12 +205,7 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
|
||||
|
||||
static inline int get_recursion_context(int *recursion)
|
||||
{
|
||||
unsigned int pc = preempt_count();
|
||||
unsigned char rctx = 0;
|
||||
|
||||
rctx += !!(pc & (NMI_MASK));
|
||||
rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK));
|
||||
rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
|
||||
unsigned char rctx = interrupt_context_level();
|
||||
|
||||
if (recursion[rctx])
|
||||
return -1;
|
||||
|
@ -3167,12 +3167,7 @@ static __always_inline int
|
||||
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
unsigned int val = cpu_buffer->current_context;
|
||||
unsigned long pc = preempt_count();
|
||||
int bit = 0;
|
||||
|
||||
bit += !!(pc & (NMI_MASK));
|
||||
bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
|
||||
bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
|
||||
int bit = interrupt_context_level();
|
||||
|
||||
bit = RB_CTX_NORMAL - bit;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user