mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-18 11:54:37 +08:00
wrap access to thread_info
Recently a few direct accesses to the thread_info in the task structure snuck back, so this wraps them with the appropriate wrapper. Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e61a1c1c4f
commit
c9f4f06d31
@ -330,13 +330,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
|
|||||||
{
|
{
|
||||||
struct pt_regs *childregs;
|
struct pt_regs *childregs;
|
||||||
|
|
||||||
childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)p->thread_info)) - 1;
|
childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)task_stack_page(p))) - 1;
|
||||||
*childregs = *regs;
|
*childregs = *regs;
|
||||||
|
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
childregs->sp = usp;
|
childregs->sp = usp;
|
||||||
else
|
else
|
||||||
childregs->sp = (unsigned long)p->thread_info + THREAD_SIZE;
|
childregs->sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
|
||||||
|
|
||||||
childregs->r12 = 0; /* Set return value for child */
|
childregs->r12 = 0; /* Set return value for child */
|
||||||
|
|
||||||
@ -403,7 +403,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || p->state == TASK_RUNNING)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
stack_page = (unsigned long)p->thread_info;
|
stack_page = (unsigned long)task_stack_page(p);
|
||||||
BUG_ON(!stack_page);
|
BUG_ON(!stack_page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
|
|
||||||
static struct pt_regs *get_user_regs(struct task_struct *tsk)
|
static struct pt_regs *get_user_regs(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
return (struct pt_regs *)((unsigned long) tsk->thread_info +
|
return (struct pt_regs *)((unsigned long)task_stack_page(tsk) +
|
||||||
THREAD_SIZE - sizeof(struct pt_regs));
|
THREAD_SIZE - sizeof(struct pt_regs));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ int main(void)
|
|||||||
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
|
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
|
||||||
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
|
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
|
||||||
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
|
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
|
||||||
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info));
|
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
|
||||||
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
|
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
|
||||||
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
|
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
|
||||||
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
|
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
|
||||||
|
@ -305,7 +305,7 @@ void show_registers(struct pt_regs *regs)
|
|||||||
regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
|
regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
|
||||||
printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
|
printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
|
||||||
TASK_COMM_LEN, current->comm, current->pid,
|
TASK_COMM_LEN, current->comm, current->pid,
|
||||||
current_thread_info(), current, current->thread_info);
|
current_thread_info(), current, task_thread_info(current));
|
||||||
/*
|
/*
|
||||||
* When in-kernel, we also print out the stack and code at the
|
* When in-kernel, we also print out the stack and code at the
|
||||||
* time of the fault..
|
* time of the fault..
|
||||||
|
@ -31,7 +31,7 @@ int main(void)
|
|||||||
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
|
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
|
||||||
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
|
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
|
||||||
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
|
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
|
||||||
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info));
|
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
|
||||||
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
|
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
|
||||||
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
|
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
|
||||||
|
|
||||||
|
@ -560,7 +560,7 @@ void smtc_boot_secondary(int cpu, struct task_struct *idle)
|
|||||||
write_tc_gpr_sp(__KSTK_TOS(idle));
|
write_tc_gpr_sp(__KSTK_TOS(idle));
|
||||||
|
|
||||||
/* global pointer */
|
/* global pointer */
|
||||||
write_tc_gpr_gp((unsigned long)idle->thread_info);
|
write_tc_gpr_gp((unsigned long)task_thread_info(idle));
|
||||||
|
|
||||||
smtc_status |= SMTC_MTC_ACTIVE;
|
smtc_status |= SMTC_MTC_ACTIVE;
|
||||||
write_tc_c0_tchalt(0);
|
write_tc_c0_tchalt(0);
|
||||||
|
@ -32,7 +32,7 @@ atomic_t irq_err_count;
|
|||||||
*/
|
*/
|
||||||
static inline void stack_overflow_check(struct pt_regs *regs)
|
static inline void stack_overflow_check(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
u64 curbase = (u64) current->thread_info;
|
u64 curbase = (u64)task_stack_page(current);
|
||||||
static unsigned long warned = -60*HZ;
|
static unsigned long warned = -60*HZ;
|
||||||
|
|
||||||
if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
|
if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
|
||||||
|
@ -172,7 +172,7 @@ static inline struct thread_info *current_thread_info(void)
|
|||||||
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
|
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
|
||||||
#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
|
#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
|
||||||
|
|
||||||
#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
|
#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
|
@ -110,6 +110,6 @@ struct thread_info {
|
|||||||
|
|
||||||
#define TS_POLLING 1 /* true if in idle loop and not sleeping */
|
#define TS_POLLING 1 /* true if in idle loop and not sleeping */
|
||||||
|
|
||||||
#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
|
#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
|
||||||
|
|
||||||
#endif /* _ASM_IA64_THREAD_INFO_H */
|
#endif /* _ASM_IA64_THREAD_INFO_H */
|
||||||
|
@ -55,7 +55,7 @@ do { \
|
|||||||
if (cpu_has_dsp) \
|
if (cpu_has_dsp) \
|
||||||
__save_dsp(prev); \
|
__save_dsp(prev); \
|
||||||
next->thread.emulated_fp = 0; \
|
next->thread.emulated_fp = 0; \
|
||||||
(last) = resume(prev, next, next->thread_info); \
|
(last) = resume(prev, next, task_thread_info(next)); \
|
||||||
if (cpu_has_dsp) \
|
if (cpu_has_dsp) \
|
||||||
__restore_dsp(current); \
|
__restore_dsp(current); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
@ -152,7 +152,7 @@ static __inline__ void __user *compat_alloc_user_space(long len)
|
|||||||
|
|
||||||
static inline int __is_compat_task(struct task_struct *t)
|
static inline int __is_compat_task(struct task_struct *t)
|
||||||
{
|
{
|
||||||
return test_ti_thread_flag(t->thread_info, TIF_32BIT);
|
return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int is_compat_task(void)
|
static inline int is_compat_task(void)
|
||||||
|
@ -162,7 +162,7 @@ static inline struct thread_info *stack_thread_info(void)
|
|||||||
#define TS_COMPAT 0x0002 /* 32bit syscall active */
|
#define TS_COMPAT 0x0002 /* 32bit syscall active */
|
||||||
#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
|
#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
|
||||||
|
|
||||||
#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
|
#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
|
@ -133,7 +133,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
|
|||||||
|
|
||||||
debug_mutex_lock_common(lock, &waiter);
|
debug_mutex_lock_common(lock, &waiter);
|
||||||
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
||||||
debug_mutex_add_waiter(lock, &waiter, task->thread_info);
|
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
|
||||||
|
|
||||||
/* add waiting tasks to the end of the waitqueue (FIFO): */
|
/* add waiting tasks to the end of the waitqueue (FIFO): */
|
||||||
list_add_tail(&waiter.list, &lock->wait_list);
|
list_add_tail(&waiter.list, &lock->wait_list);
|
||||||
@ -159,7 +159,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
|
|||||||
*/
|
*/
|
||||||
if (unlikely(state == TASK_INTERRUPTIBLE &&
|
if (unlikely(state == TASK_INTERRUPTIBLE &&
|
||||||
signal_pending(task))) {
|
signal_pending(task))) {
|
||||||
mutex_remove_waiter(lock, &waiter, task->thread_info);
|
mutex_remove_waiter(lock, &waiter, task_thread_info(task));
|
||||||
mutex_release(&lock->dep_map, 1, _RET_IP_);
|
mutex_release(&lock->dep_map, 1, _RET_IP_);
|
||||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||||
|
|
||||||
@ -175,8 +175,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* got the lock - rejoice! */
|
/* got the lock - rejoice! */
|
||||||
mutex_remove_waiter(lock, &waiter, task->thread_info);
|
mutex_remove_waiter(lock, &waiter, task_thread_info(task));
|
||||||
debug_mutex_set_owner(lock, task->thread_info);
|
debug_mutex_set_owner(lock, task_thread_info(task));
|
||||||
|
|
||||||
/* set it to 0 if there are no waiters left: */
|
/* set it to 0 if there are no waiters left: */
|
||||||
if (likely(list_empty(&lock->wait_list)))
|
if (likely(list_empty(&lock->wait_list)))
|
||||||
|
Loading…
Reference in New Issue
Block a user