mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-29 05:55:02 +08:00
d84b31313e
When a CPU runs in full dynticks mode, a 1Hz tick remains in order to keep the scheduler stats alive. However this residual tick is a burden for bare metal tasks that can't stand any interruption at all, or want to minimize them. The usual boot parameters "nohz_full=" or "isolcpus=nohz" will now outsource these scheduler ticks to the global workqueue so that a housekeeping CPU handles those remotely. The sched_class::task_tick() implementations have been audited and look safe to be called remotely as the target runqueue and its current task are passed in parameter and don't seem to be accessed locally. Note that in the case of using isolcpus, it's still up to the user to affine the global workqueues to the housekeeping CPUs through /sys/devices/virtual/workqueue/cpumask or domains isolation "isolcpus=nohz,domain". Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Christoph Lameter <cl@linux.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Wanpeng Li <kernellwp@gmail.com> Link: http://lkml.kernel.org/r/1519186649-3242-6-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
147 lines
3.3 KiB
C
147 lines
3.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include "sched.h"
|
|
|
|
/*
|
|
* stop-task scheduling class.
|
|
*
|
|
* The stop task is the highest priority task in the system, it preempts
|
|
* everything and will be preempted by nothing.
|
|
*
|
|
* See kernel/stop_machine.c
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
static int
|
|
select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
|
|
{
|
|
return task_cpu(p); /* stop tasks as never migrate */
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static void
|
|
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
/* we're never preempted */
|
|
}
|
|
|
|
static struct task_struct *
|
|
pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
|
{
|
|
struct task_struct *stop = rq->stop;
|
|
|
|
if (!stop || !task_on_rq_queued(stop))
|
|
return NULL;
|
|
|
|
put_prev_task(rq, prev);
|
|
|
|
stop->se.exec_start = rq_clock_task(rq);
|
|
|
|
return stop;
|
|
}
|
|
|
|
static void
|
|
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
add_nr_running(rq, 1);
|
|
}
|
|
|
|
static void
|
|
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
|
{
|
|
sub_nr_running(rq, 1);
|
|
}
|
|
|
|
static void yield_task_stop(struct rq *rq)
|
|
{
|
|
BUG(); /* the stop task should never yield, its pointless. */
|
|
}
|
|
|
|
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
|
|
{
|
|
struct task_struct *curr = rq->curr;
|
|
u64 delta_exec;
|
|
|
|
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
|
|
if (unlikely((s64)delta_exec < 0))
|
|
delta_exec = 0;
|
|
|
|
schedstat_set(curr->se.statistics.exec_max,
|
|
max(curr->se.statistics.exec_max, delta_exec));
|
|
|
|
curr->se.sum_exec_runtime += delta_exec;
|
|
account_group_exec_runtime(curr, delta_exec);
|
|
|
|
curr->se.exec_start = rq_clock_task(rq);
|
|
cgroup_account_cputime(curr, delta_exec);
|
|
}
|
|
|
|
/*
|
|
* scheduler tick hitting a task of our scheduling class.
|
|
*
|
|
* NOTE: This function can be called remotely by the tick offload that
|
|
* goes along full dynticks. Therefore no local assumption can be made
|
|
* and everything must be accessed through the @rq and @curr passed in
|
|
* parameters.
|
|
*/
|
|
static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
|
|
{
|
|
}
|
|
|
|
static void set_curr_task_stop(struct rq *rq)
|
|
{
|
|
struct task_struct *stop = rq->stop;
|
|
|
|
stop->se.exec_start = rq_clock_task(rq);
|
|
}
|
|
|
|
static void switched_to_stop(struct rq *rq, struct task_struct *p)
|
|
{
|
|
BUG(); /* its impossible to change to this class */
|
|
}
|
|
|
|
static void
|
|
prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
|
|
{
|
|
BUG(); /* how!?, what priority? */
|
|
}
|
|
|
|
static unsigned int
|
|
get_rr_interval_stop(struct rq *rq, struct task_struct *task)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static void update_curr_stop(struct rq *rq)
|
|
{
|
|
}
|
|
|
|
/*
|
|
* Simple, special scheduling class for the per-CPU stop tasks:
|
|
*/
|
|
const struct sched_class stop_sched_class = {
|
|
.next = &dl_sched_class,
|
|
|
|
.enqueue_task = enqueue_task_stop,
|
|
.dequeue_task = dequeue_task_stop,
|
|
.yield_task = yield_task_stop,
|
|
|
|
.check_preempt_curr = check_preempt_curr_stop,
|
|
|
|
.pick_next_task = pick_next_task_stop,
|
|
.put_prev_task = put_prev_task_stop,
|
|
|
|
#ifdef CONFIG_SMP
|
|
.select_task_rq = select_task_rq_stop,
|
|
.set_cpus_allowed = set_cpus_allowed_common,
|
|
#endif
|
|
|
|
.set_curr_task = set_curr_task_stop,
|
|
.task_tick = task_tick_stop,
|
|
|
|
.get_rr_interval = get_rr_interval_stop,
|
|
|
|
.prio_changed = prio_changed_stop,
|
|
.switched_to = switched_to_stop,
|
|
.update_curr = update_curr_stop,
|
|
};
|