mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-13 15:53:56 +08:00
sched: Introduce task_is_running()
Replace a bunch of 'p->state == TASK_RUNNING' with a new helper: task_is_running(p). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Davidlohr Bueso <dave@stgolabs.net> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Acked-by: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210611082838.222401495@infradead.org
This commit is contained in:
parent
37aadc687a
commit
b03fbd4ff2
@ -380,7 +380,7 @@ get_wchan(struct task_struct *p)
|
|||||||
{
|
{
|
||||||
unsigned long schedule_frame;
|
unsigned long schedule_frame;
|
||||||
unsigned long pc;
|
unsigned long pc;
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
/*
|
/*
|
||||||
* This one depends on the frame size of schedule(). Do a
|
* This one depends on the frame size of schedule(). Do a
|
||||||
|
@ -83,7 +83,7 @@ seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
|
|||||||
* is safe-kept and BLINK at a well known location in there
|
* is safe-kept and BLINK at a well known location in there
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (tsk->state == TASK_RUNNING)
|
if (task_is_running(tsk))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
frame_info->task = tsk;
|
frame_info->task = tsk;
|
||||||
|
@ -288,7 +288,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
struct stackframe frame;
|
struct stackframe frame;
|
||||||
unsigned long stack_page;
|
unsigned long stack_page;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
frame.fp = thread_saved_fp(p);
|
frame.fp = thread_saved_fp(p);
|
||||||
|
@ -598,7 +598,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
struct stackframe frame;
|
struct stackframe frame;
|
||||||
unsigned long stack_page, ret = 0;
|
unsigned long stack_page, ret = 0;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
stack_page = (unsigned long)try_get_task_stack(p);
|
stack_page = (unsigned long)try_get_task_stack(p);
|
||||||
|
@ -115,7 +115,7 @@ unsigned long get_wchan(struct task_struct *task)
|
|||||||
{
|
{
|
||||||
unsigned long pc = 0;
|
unsigned long pc = 0;
|
||||||
|
|
||||||
if (likely(task && task != current && task->state != TASK_RUNNING))
|
if (likely(task && task != current && !task_is_running(task)))
|
||||||
walk_stackframe(task, NULL, save_wchan, &pc);
|
walk_stackframe(task, NULL, save_wchan, &pc);
|
||||||
return pc;
|
return pc;
|
||||||
}
|
}
|
||||||
|
@ -134,7 +134,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
unsigned long stack_page;
|
unsigned long stack_page;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
stack_page = (unsigned long)p;
|
stack_page = (unsigned long)p;
|
||||||
|
@ -135,7 +135,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
unsigned long fp, pc;
|
unsigned long fp, pc;
|
||||||
unsigned long stack_page;
|
unsigned long stack_page;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
stack_page = (unsigned long)task_stack_page(p);
|
stack_page = (unsigned long)task_stack_page(p);
|
||||||
|
@ -529,7 +529,7 @@ get_wchan (struct task_struct *p)
|
|||||||
unsigned long ip;
|
unsigned long ip;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -542,7 +542,7 @@ get_wchan (struct task_struct *p)
|
|||||||
*/
|
*/
|
||||||
unw_init_from_blocked_task(&info, p);
|
unw_init_from_blocked_task(&info, p);
|
||||||
do {
|
do {
|
||||||
if (p->state == TASK_RUNNING)
|
if (task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
if (unw_unwind(&info) < 0)
|
if (unw_unwind(&info) < 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -268,7 +268,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
unsigned long fp, pc;
|
unsigned long fp, pc;
|
||||||
unsigned long stack_page;
|
unsigned long stack_page;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
stack_page = (unsigned long)task_stack_page(p);
|
stack_page = (unsigned long)task_stack_page(p);
|
||||||
|
@ -662,7 +662,7 @@ unsigned long get_wchan(struct task_struct *task)
|
|||||||
unsigned long ra = 0;
|
unsigned long ra = 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!task || task == current || task->state == TASK_RUNNING)
|
if (!task || task == current || task_is_running(task))
|
||||||
goto out;
|
goto out;
|
||||||
if (!task_stack_page(task))
|
if (!task_stack_page(task))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -239,7 +239,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
unsigned long stack_start, stack_end;
|
unsigned long stack_start, stack_end;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_FRAME_POINTER)) {
|
if (IS_ENABLED(CONFIG_FRAME_POINTER)) {
|
||||||
|
@ -223,7 +223,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
unsigned long stack_page;
|
unsigned long stack_page;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
stack_page = (unsigned long)p;
|
stack_page = (unsigned long)p;
|
||||||
|
@ -249,7 +249,7 @@ get_wchan(struct task_struct *p)
|
|||||||
unsigned long ip;
|
unsigned long ip;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -260,7 +260,7 @@ get_wchan(struct task_struct *p)
|
|||||||
do {
|
do {
|
||||||
if (unwind_once(&info) < 0)
|
if (unwind_once(&info) < 0)
|
||||||
return 0;
|
return 0;
|
||||||
if (p->state == TASK_RUNNING)
|
if (task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
ip = info.ip;
|
ip = info.ip;
|
||||||
if (!in_sched_functions(ip))
|
if (!in_sched_functions(ip))
|
||||||
|
@ -2084,7 +2084,7 @@ static unsigned long __get_wchan(struct task_struct *p)
|
|||||||
unsigned long ip, sp;
|
unsigned long ip, sp;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
sp = p->thread.ksp;
|
sp = p->thread.ksp;
|
||||||
@ -2094,7 +2094,7 @@ static unsigned long __get_wchan(struct task_struct *p)
|
|||||||
do {
|
do {
|
||||||
sp = *(unsigned long *)sp;
|
sp = *(unsigned long *)sp;
|
||||||
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
|
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
|
||||||
p->state == TASK_RUNNING)
|
task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
if (count > 0) {
|
if (count > 0) {
|
||||||
ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
|
ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
|
||||||
|
@ -132,7 +132,7 @@ unsigned long get_wchan(struct task_struct *task)
|
|||||||
{
|
{
|
||||||
unsigned long pc = 0;
|
unsigned long pc = 0;
|
||||||
|
|
||||||
if (likely(task && task != current && task->state != TASK_RUNNING))
|
if (likely(task && task != current && !task_is_running(task)))
|
||||||
walk_stackframe(task, NULL, save_wchan, &pc);
|
walk_stackframe(task, NULL, save_wchan, &pc);
|
||||||
return pc;
|
return pc;
|
||||||
}
|
}
|
||||||
|
@ -180,7 +180,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
struct unwind_state state;
|
struct unwind_state state;
|
||||||
unsigned long ip = 0;
|
unsigned long ip = 0;
|
||||||
|
|
||||||
if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
|
if (!p || p == current || task_is_running(p) || !task_stack_page(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!try_get_task_stack(p))
|
if (!try_get_task_stack(p))
|
||||||
|
@ -702,7 +702,7 @@ static void pfault_interrupt(struct ext_code ext_code,
|
|||||||
* interrupt since it must be a leftover of a PFAULT
|
* interrupt since it must be a leftover of a PFAULT
|
||||||
* CANCEL operation which didn't remove all pending
|
* CANCEL operation which didn't remove all pending
|
||||||
* completion interrupts. */
|
* completion interrupts. */
|
||||||
if (tsk->state == TASK_RUNNING)
|
if (task_is_running(tsk))
|
||||||
tsk->thread.pfault_wait = -1;
|
tsk->thread.pfault_wait = -1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -186,7 +186,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
{
|
{
|
||||||
unsigned long pc;
|
unsigned long pc;
|
||||||
|
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -376,8 +376,7 @@ unsigned long get_wchan(struct task_struct *task)
|
|||||||
struct reg_window32 *rw;
|
struct reg_window32 *rw;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (!task || task == current ||
|
if (!task || task == current || task_is_running(task))
|
||||||
task->state == TASK_RUNNING)
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
fp = task_thread_info(task)->ksp + bias;
|
fp = task_thread_info(task)->ksp + bias;
|
||||||
|
@ -674,8 +674,7 @@ unsigned long get_wchan(struct task_struct *task)
|
|||||||
unsigned long ret = 0;
|
unsigned long ret = 0;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (!task || task == current ||
|
if (!task || task == current || task_is_running(task))
|
||||||
task->state == TASK_RUNNING)
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
tp = task_thread_info(task);
|
tp = task_thread_info(task);
|
||||||
|
@ -369,7 +369,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
unsigned long stack_page, sp, ip;
|
unsigned long stack_page, sp, ip;
|
||||||
bool seen_sched = 0;
|
bool seen_sched = 0;
|
||||||
|
|
||||||
if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
|
if ((p == NULL) || (p == current) || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
stack_page = (unsigned long) task_stack_page(p);
|
stack_page = (unsigned long) task_stack_page(p);
|
||||||
|
@ -931,7 +931,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
|
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (p == current || p->state == TASK_RUNNING)
|
if (p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!try_get_task_stack(p))
|
if (!try_get_task_stack(p))
|
||||||
@ -975,7 +975,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
|
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
|
||||||
} while (count++ < 16 && p->state != TASK_RUNNING);
|
} while (count++ < 16 && !task_is_running(p));
|
||||||
|
|
||||||
out:
|
out:
|
||||||
put_task_stack(p);
|
put_task_stack(p);
|
||||||
|
@ -304,7 +304,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
unsigned long stack_page = (unsigned long) task_stack_page(p);
|
unsigned long stack_page = (unsigned long) task_stack_page(p);
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || task_is_running(p))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
sp = p->thread.sp;
|
sp = p->thread.sp;
|
||||||
|
@ -3926,7 +3926,7 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
|
|||||||
if (signal_pending_state(state, current))
|
if (signal_pending_state(state, current))
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
|
|
||||||
if (current->state == TASK_RUNNING)
|
if (task_is_running(current))
|
||||||
return 1;
|
return 1;
|
||||||
if (ret < 0 || !spin)
|
if (ret < 0 || !spin)
|
||||||
break;
|
break;
|
||||||
|
@ -113,6 +113,8 @@ struct task_group;
|
|||||||
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
|
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
|
||||||
TASK_PARKED)
|
TASK_PARKED)
|
||||||
|
|
||||||
|
#define task_is_running(task) (READ_ONCE((task)->state) == TASK_RUNNING)
|
||||||
|
|
||||||
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
|
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
|
||||||
|
|
||||||
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
|
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
|
||||||
|
@ -460,7 +460,7 @@ static void set_other_info_task_blocking(unsigned long *flags,
|
|||||||
* We may be instrumenting a code-path where current->state is already
|
* We may be instrumenting a code-path where current->state is already
|
||||||
* something other than TASK_RUNNING.
|
* something other than TASK_RUNNING.
|
||||||
*/
|
*/
|
||||||
const bool is_running = current->state == TASK_RUNNING;
|
const bool is_running = task_is_running(current);
|
||||||
/*
|
/*
|
||||||
* To avoid deadlock in case we are in an interrupt here and this is a
|
* To avoid deadlock in case we are in an interrupt here and this is a
|
||||||
* race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a
|
* race with a task on the same CPU (KCSAN_INTERRUPT_WATCHER), provide a
|
||||||
|
@ -760,7 +760,7 @@ static void lockdep_print_held_locks(struct task_struct *p)
|
|||||||
* It's not reliable to print a task's held locks if it's not sleeping
|
* It's not reliable to print a task's held locks if it's not sleeping
|
||||||
* and it's not the current task.
|
* and it's not the current task.
|
||||||
*/
|
*/
|
||||||
if (p->state == TASK_RUNNING && p != current)
|
if (p != current && task_is_running(p))
|
||||||
return;
|
return;
|
||||||
for (i = 0; i < depth; i++) {
|
for (i = 0; i < depth; i++) {
|
||||||
printk(" #%d: ", i);
|
printk(" #%d: ", i);
|
||||||
|
@ -2768,7 +2768,7 @@ EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
|
|||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
|
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
|
||||||
{
|
{
|
||||||
return tsp && tsp->state == TASK_RUNNING && !tsp->on_cpu ? "!" : "";
|
return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
|
||||||
}
|
}
|
||||||
#else // #ifdef CONFIG_SMP
|
#else // #ifdef CONFIG_SMP
|
||||||
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
|
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
|
||||||
|
@ -5974,7 +5974,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
|||||||
{
|
{
|
||||||
unsigned int task_flags;
|
unsigned int task_flags;
|
||||||
|
|
||||||
if (!tsk->state)
|
if (task_is_running(tsk))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
task_flags = tsk->flags;
|
task_flags = tsk->flags;
|
||||||
@ -7949,7 +7949,7 @@ again:
|
|||||||
if (curr->sched_class != p->sched_class)
|
if (curr->sched_class != p->sched_class)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
if (task_running(p_rq, p) || p->state)
|
if (task_running(p_rq, p) || !task_is_running(p))
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
yielded = curr->sched_class->yield_to_task(rq, p);
|
yielded = curr->sched_class->yield_to_task(rq, p);
|
||||||
@ -8152,7 +8152,7 @@ void sched_show_task(struct task_struct *p)
|
|||||||
|
|
||||||
pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
|
pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
|
||||||
|
|
||||||
if (p->state == TASK_RUNNING)
|
if (task_is_running(p))
|
||||||
pr_cont(" running task ");
|
pr_cont(" running task ");
|
||||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||||
free = stack_not_used(p);
|
free = stack_not_used(p);
|
||||||
|
@ -217,7 +217,7 @@ static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
|
|||||||
|
|
||||||
rq_sched_info_depart(rq, delta);
|
rq_sched_info_depart(rq, delta);
|
||||||
|
|
||||||
if (t->state == TASK_RUNNING)
|
if (task_is_running(t))
|
||||||
sched_info_enqueue(rq, t);
|
sched_info_enqueue(rq, t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4719,7 +4719,7 @@ void kdb_send_sig(struct task_struct *t, int sig)
|
|||||||
}
|
}
|
||||||
new_t = kdb_prev_t != t;
|
new_t = kdb_prev_t != t;
|
||||||
kdb_prev_t = t;
|
kdb_prev_t = t;
|
||||||
if (t->state != TASK_RUNNING && new_t) {
|
if (!task_is_running(t) && new_t) {
|
||||||
spin_unlock(&t->sighand->siglock);
|
spin_unlock(&t->sighand->siglock);
|
||||||
kdb_printf("Process is not RUNNING, sending a signal from "
|
kdb_printf("Process is not RUNNING, sending a signal from "
|
||||||
"kdb risks deadlock\n"
|
"kdb risks deadlock\n"
|
||||||
|
@ -92,8 +92,7 @@ static bool ksoftirqd_running(unsigned long pending)
|
|||||||
|
|
||||||
if (pending & SOFTIRQ_NOW_MASK)
|
if (pending & SOFTIRQ_NOW_MASK)
|
||||||
return false;
|
return false;
|
||||||
return tsk && (tsk->state == TASK_RUNNING) &&
|
return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
|
||||||
!__kthread_should_park(tsk);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
|
@ -1955,7 +1955,7 @@ static inline bool is_via_compact_memory(int order)
|
|||||||
|
|
||||||
static bool kswapd_is_running(pg_data_t *pgdat)
|
static bool kswapd_is_running(pg_data_t *pgdat)
|
||||||
{
|
{
|
||||||
return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING);
|
return pgdat->kswapd && task_is_running(pgdat->kswapd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user