mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 06:55:13 +08:00
5d69eca542
All classes use sched_entity::exec_start to track runtime and have copies of the exact same code around to compute runtime. Collapse all that. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Daniel Bristot de Oliveira <bristot@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Phil Auld <pauld@redhat.com> Reviewed-by: Valentin Schneider <vschneid@redhat.com> Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org> Link: https://lkml.kernel.org/r/54d148a144f26d9559698c4dd82d8859038a7380.1699095159.git.bristot@kernel.org
131 lines
2.8 KiB
C
131 lines
2.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* stop-task scheduling class.
|
|
*
|
|
* The stop task is the highest priority task in the system, it preempts
|
|
* everything and will be preempted by nothing.
|
|
*
|
|
* See kernel/stop_machine.c
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
static int
|
|
select_task_rq_stop(struct task_struct *p, int cpu, int flags)
|
|
{
|
|
return task_cpu(p); /* stop tasks as never migrate */
|
|
}
|
|
|
|
static int
|
|
balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
|
{
|
|
return sched_stop_runnable(rq);
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static void
|
|
wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
/* we're never preempted */
|
|
}
|
|
|
|
static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
|
|
{
|
|
stop->se.exec_start = rq_clock_task(rq);
|
|
}
|
|
|
|
static struct task_struct *pick_task_stop(struct rq *rq)
|
|
{
|
|
if (!sched_stop_runnable(rq))
|
|
return NULL;
|
|
|
|
return rq->stop;
|
|
}
|
|
|
|
static struct task_struct *pick_next_task_stop(struct rq *rq)
|
|
{
|
|
struct task_struct *p = pick_task_stop(rq);
|
|
|
|
if (p)
|
|
set_next_task_stop(rq, p, true);
|
|
|
|
return p;
|
|
}
|
|
|
|
static void
|
|
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
add_nr_running(rq, 1);
|
|
}
|
|
|
|
static void
|
|
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
sub_nr_running(rq, 1);
|
|
}
|
|
|
|
static void yield_task_stop(struct rq *rq)
|
|
{
|
|
BUG(); /* the stop task should never yield, its pointless. */
|
|
}
|
|
|
|
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
update_curr_common(rq);
|
|
}
|
|
|
|
/*
|
|
* scheduler tick hitting a task of our scheduling class.
|
|
*
|
|
* NOTE: This function can be called remotely by the tick offload that
|
|
* goes along full dynticks. Therefore no local assumption can be made
|
|
* and everything must be accessed through the @rq and @curr passed in
|
|
* parameters.
|
|
*/
|
|
static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
|
|
{
|
|
}
|
|
|
|
static void switched_to_stop(struct rq *rq, struct task_struct *p)
|
|
{
|
|
BUG(); /* its impossible to change to this class */
|
|
}
|
|
|
|
static void
|
|
prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
|
|
{
|
|
BUG(); /* how!?, what priority? */
|
|
}
|
|
|
|
static void update_curr_stop(struct rq *rq)
|
|
{
|
|
}
|
|
|
|
/*
|
|
* Simple, special scheduling class for the per-CPU stop tasks:
|
|
*/
|
|
DEFINE_SCHED_CLASS(stop) = {
|
|
|
|
.enqueue_task = enqueue_task_stop,
|
|
.dequeue_task = dequeue_task_stop,
|
|
.yield_task = yield_task_stop,
|
|
|
|
.wakeup_preempt = wakeup_preempt_stop,
|
|
|
|
.pick_next_task = pick_next_task_stop,
|
|
.put_prev_task = put_prev_task_stop,
|
|
.set_next_task = set_next_task_stop,
|
|
|
|
#ifdef CONFIG_SMP
|
|
.balance = balance_stop,
|
|
.pick_task = pick_task_stop,
|
|
.select_task_rq = select_task_rq_stop,
|
|
.set_cpus_allowed = set_cpus_allowed_common,
|
|
#endif
|
|
|
|
.task_tick = task_tick_stop,
|
|
|
|
.prio_changed = prio_changed_stop,
|
|
.switched_to = switched_to_stop,
|
|
.update_curr = update_curr_stop,
|
|
};
|