mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-30 07:34:12 +08:00
arm64: use preempt_disable_notrace in _percpu_read/write
When debug preempt or preempt tracer is enabled, preempt_count_add/sub()
can be traced by function and function graph tracing, and
preempt_disable/enable() would call preempt_count_add/sub(), so in Ftrace
subsystem we should use preempt_disable/enable_notrace instead.
In the commit 345ddcc882
("ftrace: Have set_ftrace_pid use the bitmap
like events do") the function this_cpu_read() was added to
trace_graph_entry(), and if this_cpu_read() calls preempt_disable(), graph
tracer will go into a recursive loop, even if the tracing_on is
disabled.
So this patch change to use preempt_enable/disable_notrace instead in
this_cpu_read().
Since Yonghui Yang helped a lot to find the root cause of this problem,
so also add his SOB.
Signed-off-by: Yonghui Yang <mark.yang@spreadtrum.com>
Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
872c63fbf9
commit
2b9743441a
@ -199,19 +199,19 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
#define _percpu_read(pcp) \
|
||||
({ \
|
||||
typeof(pcp) __retval; \
|
||||
preempt_disable(); \
|
||||
preempt_disable_notrace(); \
|
||||
__retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
|
||||
sizeof(pcp)); \
|
||||
preempt_enable(); \
|
||||
preempt_enable_notrace(); \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
#define _percpu_write(pcp, val) \
|
||||
do { \
|
||||
preempt_disable(); \
|
||||
preempt_disable_notrace(); \
|
||||
__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
|
||||
sizeof(pcp)); \
|
||||
preempt_enable(); \
|
||||
preempt_enable_notrace(); \
|
||||
} while(0) \
|
||||
|
||||
#define _pcp_protect(operation, pcp, val) \
|
||||
|
Loading…
Reference in New Issue
Block a user