mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 17:53:56 +08:00
sched/fair: Start tracking SCHED_IDLE tasks count in cfs_rq
Track how many tasks are present with SCHED_IDLE policy in each cfs_rq. This will be used by later commits. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: chris.redpath@arm.com Cc: quentin.perret@linaro.org Cc: songliubraving@fb.com Cc: steven.sistare@oracle.com Cc: subhra.mazumdar@oracle.com Cc: tkjos@google.com Link: https://lkml.kernel.org/r/0d3cdc427fc68808ad5bccc40e86ed0bf9da8bb4.1561523542.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
84ec3a0787
commit
43e9f7f231
@ -4555,7 +4555,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
|
||||
struct sched_entity *se;
|
||||
long task_delta, dequeue = 1;
|
||||
long task_delta, idle_task_delta, dequeue = 1;
|
||||
bool empty;
|
||||
|
||||
se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
|
||||
@ -4566,6 +4566,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
rcu_read_unlock();
|
||||
|
||||
task_delta = cfs_rq->h_nr_running;
|
||||
idle_task_delta = cfs_rq->idle_h_nr_running;
|
||||
for_each_sched_entity(se) {
|
||||
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
|
||||
/* throttled entity or throttle-on-deactivate */
|
||||
@ -4575,6 +4576,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
if (dequeue)
|
||||
dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
|
||||
qcfs_rq->h_nr_running -= task_delta;
|
||||
qcfs_rq->idle_h_nr_running -= idle_task_delta;
|
||||
|
||||
if (qcfs_rq->load.weight)
|
||||
dequeue = 0;
|
||||
@ -4614,7 +4616,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
|
||||
struct sched_entity *se;
|
||||
int enqueue = 1;
|
||||
long task_delta;
|
||||
long task_delta, idle_task_delta;
|
||||
|
||||
se = cfs_rq->tg->se[cpu_of(rq)];
|
||||
|
||||
@ -4634,6 +4636,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
return;
|
||||
|
||||
task_delta = cfs_rq->h_nr_running;
|
||||
idle_task_delta = cfs_rq->idle_h_nr_running;
|
||||
for_each_sched_entity(se) {
|
||||
if (se->on_rq)
|
||||
enqueue = 0;
|
||||
@ -4642,6 +4645,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
if (enqueue)
|
||||
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
|
||||
cfs_rq->h_nr_running += task_delta;
|
||||
cfs_rq->idle_h_nr_running += idle_task_delta;
|
||||
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
@ -5255,6 +5259,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
struct cfs_rq *cfs_rq;
|
||||
struct sched_entity *se = &p->se;
|
||||
int idle_h_nr_running = task_has_idle_policy(p);
|
||||
|
||||
/*
|
||||
* The code below (indirectly) updates schedutil which looks at
|
||||
@ -5287,6 +5292,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
cfs_rq->h_nr_running++;
|
||||
cfs_rq->idle_h_nr_running += idle_h_nr_running;
|
||||
|
||||
flags = ENQUEUE_WAKEUP;
|
||||
}
|
||||
@ -5294,6 +5300,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
for_each_sched_entity(se) {
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
cfs_rq->h_nr_running++;
|
||||
cfs_rq->idle_h_nr_running += idle_h_nr_running;
|
||||
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
@ -5355,6 +5362,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
struct cfs_rq *cfs_rq;
|
||||
struct sched_entity *se = &p->se;
|
||||
int task_sleep = flags & DEQUEUE_SLEEP;
|
||||
int idle_h_nr_running = task_has_idle_policy(p);
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
@ -5369,6 +5377,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
cfs_rq->h_nr_running--;
|
||||
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
|
||||
|
||||
/* Don't dequeue parent if it has other entities besides us */
|
||||
if (cfs_rq->load.weight) {
|
||||
@ -5388,6 +5397,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
for_each_sched_entity(se) {
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
cfs_rq->h_nr_running--;
|
||||
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
|
||||
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
|
@ -483,7 +483,8 @@ struct cfs_rq {
|
||||
struct load_weight load;
|
||||
unsigned long runnable_weight;
|
||||
unsigned int nr_running;
|
||||
unsigned int h_nr_running;
|
||||
unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
|
||||
unsigned int idle_h_nr_running; /* SCHED_IDLE */
|
||||
|
||||
u64 exec_clock;
|
||||
u64 min_vruntime;
|
||||
|
Loading…
Reference in New Issue
Block a user