mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 21:54:06 +08:00
91c27493e7
interrupt and steal time are the only remaining activities tracked by rt_avg. Like for sched classes, we can use PELT to track their average utilization of the CPU. But unlike sched class, we don't track when entering/leaving interrupt; Instead, we take into account the time spent under interrupt context when we update rqs' clock (rq_clock_task). This also means that we have to decay the normal context time and account for interrupt time during the update. That's also important to note that because: rq_clock == rq_clock_task + interrupt time and rq_clock_task is used by a sched class to compute its utilization, the util_avg of a sched class only reflects the utilization of the time spent in normal context and not of the whole time of the CPU. The utilization of interrupt gives an more accurate level of utilization of CPU. The CPU utilization is: avg_irq + (1 - avg_irq / max capacity) * /Sum avg_rq Most of the time, avg_irq is small and neglictible so the use of the approximation CPU utilization = /Sum avg_rq was enough. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: claudio@evidence.eu.com Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: joel@joelfernandes.org Cc: juri.lelli@redhat.com Cc: luca.abeni@santannapisa.it Cc: patrick.bellasi@arm.com Cc: quentin.perret@arm.com Cc: rjw@rjwysocki.net Cc: valentin.schneider@arm.com Cc: viresh.kumar@linaro.org Link: http://lkml.kernel.org/r/1530200714-4504-7-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
73 lines
1.7 KiB
C
73 lines
1.7 KiB
C
#ifdef CONFIG_SMP
|
|
|
|
int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
|
|
int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
|
|
int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
|
|
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
|
|
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
|
|
|
|
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
|
int update_irq_load_avg(struct rq *rq, u64 running);
|
|
#else
|
|
static inline int
|
|
update_irq_load_avg(struct rq *rq, u64 running)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* When a task is dequeued, its estimated utilization should not be update if
|
|
* its util_avg has not been updated at least once.
|
|
* This flag is used to synchronize util_avg updates with util_est updates.
|
|
* We map this information into the LSB bit of the utilization saved at
|
|
* dequeue time (i.e. util_est.dequeued).
|
|
*/
|
|
#define UTIL_AVG_UNCHANGED 0x1
|
|
|
|
static inline void cfs_se_util_change(struct sched_avg *avg)
|
|
{
|
|
unsigned int enqueued;
|
|
|
|
if (!sched_feat(UTIL_EST))
|
|
return;
|
|
|
|
/* Avoid store if the flag has been already set */
|
|
enqueued = avg->util_est.enqueued;
|
|
if (!(enqueued & UTIL_AVG_UNCHANGED))
|
|
return;
|
|
|
|
/* Reset flag to report util_avg has been updated */
|
|
enqueued &= ~UTIL_AVG_UNCHANGED;
|
|
WRITE_ONCE(avg->util_est.enqueued, enqueued);
|
|
}
|
|
|
|
#else
|
|
|
|
static inline int
|
|
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int
|
|
update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int
|
|
update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int
|
|
update_irq_load_avg(struct rq *rq, u64 running)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
|