mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
sched/dl: Support schedstats for deadline sched class
After we make the struct sched_statistics and the helpers of it independent of fair sched class, we can easily use the schedstats facility for deadline sched class. The schedstat usage in DL sched class is similar with fair sched class, for example, fair deadline enqueue update_stats_enqueue_fair update_stats_enqueue_dl dequeue update_stats_dequeue_fair update_stats_dequeue_dl put_prev_task update_stats_wait_start update_stats_wait_start_dl set_next_task update_stats_wait_end update_stats_wait_end_dl The user can get the schedstats information in the same way in fair sched class. For example, fair deadline /proc/[pid]/sched /proc/[pid]/sched The output of a deadline task's schedstats as follows, $ cat /proc/69662/sched ... se.sum_exec_runtime : 3067.696449 se.nr_migrations : 0 sum_sleep_runtime : 720144.029661 sum_block_runtime : 0.547853 wait_start : 0.000000 sleep_start : 14131540.828955 block_start : 0.000000 sleep_max : 2999.974045 block_max : 0.283637 exec_max : 1.000269 slice_max : 0.000000 wait_max : 0.002217 wait_sum : 0.762179 wait_count : 733 iowait_sum : 0.547853 iowait_count : 3 nr_migrations_cold : 0 nr_failed_migrations_affine : 0 nr_failed_migrations_running : 0 nr_failed_migrations_hot : 0 nr_forced_migrations : 0 nr_wakeups : 246 nr_wakeups_sync : 2 nr_wakeups_migrate : 0 nr_wakeups_local : 244 nr_wakeups_remote : 2 nr_wakeups_affine : 0 nr_wakeups_affine_attempts : 0 nr_wakeups_passive : 0 nr_wakeups_idle : 0 ... The sched:sched_stat_{wait, sleep, iowait, blocked} tracepoints can be used to trace deadlline tasks as well. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210905143547.4668-9-laoar.shao@gmail.com
This commit is contained in:
parent
95fd58e8da
commit
b5eb4a5f65
@ -1474,6 +1474,82 @@ static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
|
||||
return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
|
||||
}
|
||||
|
||||
static inline struct sched_statistics *
|
||||
__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
|
||||
{
|
||||
return &dl_task_of(dl_se)->stats;
|
||||
}
|
||||
|
||||
static inline void
|
||||
update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
|
||||
{
|
||||
struct sched_statistics *stats;
|
||||
|
||||
if (!schedstat_enabled())
|
||||
return;
|
||||
|
||||
stats = __schedstats_from_dl_se(dl_se);
|
||||
__update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
|
||||
}
|
||||
|
||||
static inline void
|
||||
update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
|
||||
{
|
||||
struct sched_statistics *stats;
|
||||
|
||||
if (!schedstat_enabled())
|
||||
return;
|
||||
|
||||
stats = __schedstats_from_dl_se(dl_se);
|
||||
__update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
|
||||
}
|
||||
|
||||
static inline void
|
||||
update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
|
||||
{
|
||||
struct sched_statistics *stats;
|
||||
|
||||
if (!schedstat_enabled())
|
||||
return;
|
||||
|
||||
stats = __schedstats_from_dl_se(dl_se);
|
||||
__update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
|
||||
}
|
||||
|
||||
static inline void
|
||||
update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
|
||||
int flags)
|
||||
{
|
||||
if (!schedstat_enabled())
|
||||
return;
|
||||
|
||||
if (flags & ENQUEUE_WAKEUP)
|
||||
update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
|
||||
}
|
||||
|
||||
static inline void
|
||||
update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
|
||||
int flags)
|
||||
{
|
||||
struct task_struct *p = dl_task_of(dl_se);
|
||||
|
||||
if (!schedstat_enabled())
|
||||
return;
|
||||
|
||||
if ((flags & DEQUEUE_SLEEP)) {
|
||||
unsigned int state;
|
||||
|
||||
state = READ_ONCE(p->__state);
|
||||
if (state & TASK_INTERRUPTIBLE)
|
||||
__schedstat_set(p->stats.sleep_start,
|
||||
rq_clock(rq_of_dl_rq(dl_rq)));
|
||||
|
||||
if (state & TASK_UNINTERRUPTIBLE)
|
||||
__schedstat_set(p->stats.block_start,
|
||||
rq_clock(rq_of_dl_rq(dl_rq)));
|
||||
}
|
||||
}
|
||||
|
||||
static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
|
||||
{
|
||||
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
|
||||
@ -1504,6 +1580,8 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
|
||||
{
|
||||
BUG_ON(on_dl_rq(dl_se));
|
||||
|
||||
update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
|
||||
|
||||
/*
|
||||
* If this is a wakeup or a new instance, the scheduling
|
||||
* parameters of the task might need updating. Otherwise,
|
||||
@ -1600,6 +1678,9 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
|
||||
return;
|
||||
}
|
||||
|
||||
check_schedstat_required();
|
||||
update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
|
||||
|
||||
enqueue_dl_entity(&p->dl, flags);
|
||||
|
||||
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
|
||||
@ -1608,6 +1689,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
||||
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
update_stats_dequeue_dl(&rq->dl, &p->dl, flags);
|
||||
dequeue_dl_entity(&p->dl);
|
||||
dequeue_pushable_dl_task(rq, p);
|
||||
}
|
||||
@ -1827,7 +1909,12 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
|
||||
|
||||
static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
|
||||
{
|
||||
struct sched_dl_entity *dl_se = &p->dl;
|
||||
struct dl_rq *dl_rq = &rq->dl;
|
||||
|
||||
p->se.exec_start = rq_clock_task(rq);
|
||||
if (on_dl_rq(&p->dl))
|
||||
update_stats_wait_end_dl(dl_rq, dl_se);
|
||||
|
||||
/* You can't push away the running task */
|
||||
dequeue_pushable_dl_task(rq, p);
|
||||
@ -1884,6 +1971,12 @@ static struct task_struct *pick_next_task_dl(struct rq *rq)
|
||||
|
||||
static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
struct sched_dl_entity *dl_se = &p->dl;
|
||||
struct dl_rq *dl_rq = &rq->dl;
|
||||
|
||||
if (on_dl_rq(&p->dl))
|
||||
update_stats_wait_start_dl(dl_rq, dl_se);
|
||||
|
||||
update_curr_dl(rq);
|
||||
|
||||
update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
|
||||
|
Loading…
Reference in New Issue
Block a user