mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 00:24:12 +08:00
d8ac897137
In preparation for adding diagnostic checks to catch missing calls to update_rq_clock(), provide wrappers for (re)pinning and unpinning rq->lock. Because the pending diagnostic checks allow state to be maintained in rq_flags across pin contexts, swap the 'struct pin_cookie' arguments for 'struct rq_flags *'. Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jan Kara <jack@suse.cz> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luca Abeni <luca.abeni@unitn.it> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Mike Galbraith <efault@gmx.de> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Rik van Riel <riel@redhat.com> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Cc: Yuyang Du <yuyang.du@intel.com> Link: http://lkml.kernel.org/r/20160921133813.31976-5-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar <mingo@kernel.org>
110 lines
2.3 KiB
C
110 lines
2.3 KiB
C
#include "sched.h"
|
|
|
|
/*
|
|
* idle-task scheduling class.
|
|
*
|
|
* (NOTE: these are not related to SCHED_IDLE tasks which are
|
|
* handled in sched/fair.c)
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
static int
|
|
select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
|
|
{
|
|
return task_cpu(p); /* IDLE tasks as never migrated */
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/*
|
|
* Idle tasks are unconditionally rescheduled:
|
|
*/
|
|
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
resched_curr(rq);
|
|
}
|
|
|
|
static struct task_struct *
|
|
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
|
{
|
|
put_prev_task(rq, prev);
|
|
update_idle_core(rq);
|
|
schedstat_inc(rq->sched_goidle);
|
|
return rq->idle;
|
|
}
|
|
|
|
/*
|
|
* It is not legal to sleep in the idle task - print a warning
|
|
* message if some code attempts to do it:
|
|
*/
|
|
static void
|
|
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
raw_spin_unlock_irq(&rq->lock);
|
|
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
|
|
dump_stack();
|
|
raw_spin_lock_irq(&rq->lock);
|
|
}
|
|
|
|
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
rq_last_tick_reset(rq);
|
|
}
|
|
|
|
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
|
|
{
|
|
}
|
|
|
|
static void set_curr_task_idle(struct rq *rq)
|
|
{
|
|
}
|
|
|
|
static void switched_to_idle(struct rq *rq, struct task_struct *p)
|
|
{
|
|
BUG();
|
|
}
|
|
|
|
static void
|
|
prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
|
|
{
|
|
BUG();
|
|
}
|
|
|
|
static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static void update_curr_idle(struct rq *rq)
|
|
{
|
|
}
|
|
|
|
/*
|
|
* Simple, special scheduling class for the per-CPU idle tasks:
|
|
*/
|
|
const struct sched_class idle_sched_class = {
|
|
/* .next is NULL */
|
|
/* no enqueue/yield_task for idle tasks */
|
|
|
|
/* dequeue is not valid, we print a debug message there: */
|
|
.dequeue_task = dequeue_task_idle,
|
|
|
|
.check_preempt_curr = check_preempt_curr_idle,
|
|
|
|
.pick_next_task = pick_next_task_idle,
|
|
.put_prev_task = put_prev_task_idle,
|
|
|
|
#ifdef CONFIG_SMP
|
|
.select_task_rq = select_task_rq_idle,
|
|
.set_cpus_allowed = set_cpus_allowed_common,
|
|
#endif
|
|
|
|
.set_curr_task = set_curr_task_idle,
|
|
.task_tick = task_tick_idle,
|
|
|
|
.get_rr_interval = get_rr_interval_idle,
|
|
|
|
.prio_changed = prio_changed_idle,
|
|
.switched_to = switched_to_idle,
|
|
.update_curr = update_curr_idle,
|
|
};
|