mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 21:54:11 +08:00
1e8352784a
2.6.25-rc1 percpu changes broke CONFIG_DEBUG_PREEMPT's per_cpu checking on several architectures. On s390, sparc64 and x86 it's been weakened to not checking at all; whereas on powerpc64 it's become too strict, issuing warnings from __raw_get_cpu_var in io_schedule and init_timer for example. Fix this by weakening powerpc's __my_cpu_offset to use the non-checking local_paca instead of get_paca (which itself contains such a check); and strengthening the generic my_cpu_offset to go the old slow way via smp_processor_id when CONFIG_DEBUG_PREEMPT (debug_smp_processor_id is where all the knowledge of what's correct when lives). Signed-off-by: Hugh Dickins <hugh@veritas.com> Reviewed-by: Mike Travis <travis@sgi.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
25 lines
559 B
C
25 lines
559 B
C
#ifndef _ASM_POWERPC_PERCPU_H_
|
|
#define _ASM_POWERPC_PERCPU_H_
|
|
#ifdef __powerpc64__
|
|
#include <linux/compiler.h>
|
|
|
|
/*
|
|
* Same as asm-generic/percpu.h, except that we store the per cpu offset
|
|
* in the paca. Based on the x86-64 implementation.
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#include <asm/paca.h>
|
|
|
|
#define __per_cpu_offset(cpu) (paca[cpu].data_offset)
|
|
#define __my_cpu_offset local_paca->data_offset
|
|
#define per_cpu_offset(x) (__per_cpu_offset(x))
|
|
|
|
#endif /* CONFIG_SMP */
|
|
#endif /* __powerpc64__ */
|
|
|
|
#include <asm-generic/percpu.h>
|
|
|
|
#endif /* _ASM_POWERPC_PERCPU_H_ */
|